code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
UpperCamelCase__ = Image.open(requests.get(__A , stream=__A ).raw ).convert("RGB" )
return image
def _UpperCamelCase ( __A ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def _UpperCamelCase ( __A , __A , __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = dct.pop(__A )
UpperCamelCase__ = val
def _UpperCamelCase ( __A , __A ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase__ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase__ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase__ = torch.cat((q_bias, torch.zeros_like(__A , requires_grad=__A ), v_bias) )
UpperCamelCase__ = qkv_bias
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = 364 if "coco" in model_name else 224
UpperCamelCase__ = InstructBlipVisionConfig(image_size=__A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
UpperCamelCase__ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase__ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
UpperCamelCase__ = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
UpperCamelCase__ = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
UpperCamelCase__ = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
UpperCamelCase__ = InstructBlipConfig(vision_config=__A , text_config=__A , qformer_config=__A )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( __A , __A=None , __A=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
UpperCamelCase__ = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
UpperCamelCase__ = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
UpperCamelCase__ , UpperCamelCase__ = get_blipa_config(__A )
UpperCamelCase__ = InstructBlipForConditionalGeneration(__A ).eval()
UpperCamelCase__ = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
UpperCamelCase__ , UpperCamelCase__ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
UpperCamelCase__ = "cuda:1" if torch.cuda.is_available() else "cpu"
UpperCamelCase__ = "cuda:2" if torch.cuda.is_available() else "cpu"
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = load_model_and_preprocess(
name=__A , model_type=__A , is_eval=__A , device=__A )
original_model.eval()
print("Done!" )
# update state dict keys
UpperCamelCase__ = original_model.state_dict()
UpperCamelCase__ = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase__ = state_dict.pop(__A )
if key.startswith("Qformer.bert" ):
UpperCamelCase__ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
UpperCamelCase__ = key.replace("self" , "attention" )
if "llm_proj" in key:
UpperCamelCase__ = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
UpperCamelCase__ = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
UpperCamelCase__ = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
UpperCamelCase__ = key.replace("t5" , "language" )
UpperCamelCase__ = val
# read in qv biases
read_in_q_v_bias(__A , __A )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__A , strict=__A )
UpperCamelCase__ = load_demo_image()
UpperCamelCase__ = "What is unusual about this image?"
# create processor
UpperCamelCase__ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=__A , image_std=__A )
UpperCamelCase__ = InstructBlipProcessor(
image_processor=__A , tokenizer=__A , qformer_tokenizer=__A , )
UpperCamelCase__ = processor(images=__A , text=__A , return_tensors="pt" ).to(__A )
# make sure processor creates exact same pixel values
UpperCamelCase__ = vis_processors["eval"](__A ).unsqueeze(0 ).to(__A )
UpperCamelCase__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __A )
original_model.to(__A )
hf_model.to(__A )
with torch.no_grad():
if "vicuna" in model_name:
UpperCamelCase__ = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
UpperCamelCase__ = hf_model(**__A ).logits
else:
UpperCamelCase__ = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
UpperCamelCase__ = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__A )
UpperCamelCase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase__ = hf_model(**__A , labels=__A ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
UpperCamelCase__ = 1E-4 if "vicuna" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , __A , atol=__A )
print("Looks ok!" )
print("Generating with original model..." )
UpperCamelCase__ = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
UpperCamelCase__ = hf_model.generate(
**__A , do_sample=__A , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
UpperCamelCase__ = 2
print("Original generation:" , __A )
UpperCamelCase__ = processor.batch_decode(__A , skip_special_tokens=__A )
UpperCamelCase__ = [text.strip() for text in output_text]
print("HF generation:" , __A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__A )
hf_model.save_pretrained(__A )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
a__ : int = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a__ : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 80 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _UpperCamelCase ( __A , __A , __A=1024 , __A=1024 , __A=False , **__A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="train" , **__A )
UpperCamelCase__ = tok.pad_token_id
def get_lens(__A ):
UpperCamelCase__ = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase__ = []
for batch in dl:
UpperCamelCase__ = batch["input_ids"].ne(__A ).sum(1 ).tolist()
UpperCamelCase__ = batch["labels"].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCamelCase__ = get_lens(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="val" , **__A )
UpperCamelCase__ = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 80 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] )-> List[Any]:
_lowerCamelCase = os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCamelCase = json.loads(open(snake_case ).read() )
if not params:
raise ValueError(
f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('.pt' ):
_lowerCamelCase = args.output + '.pt'
_lowerCamelCase = OrderedDict()
with tf.device('/CPU:0' ):
_lowerCamelCase = tf.train.load_checkpoint(args.tf_model_dir )
_lowerCamelCase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCamelCase = reader.get_tensor(snake_case ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCamelCase = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCamelCase = 8
_lowerCamelCase = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.startswith('model/moe' ):
_lowerCamelCase = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCamelCase = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCamelCase = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCamelCase = key_name[-9:-7]
for i in range(16 ):
_lowerCamelCase = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCamelCase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.startswith('model/mlp' ):
_lowerCamelCase = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCamelCase = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.endswith('/p1/bias' ):
_lowerCamelCase = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCamelCase = vnp.copy() # same because it is one dimensional
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.endswith('/p2/kernel' ):
_lowerCamelCase = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.endswith('/p2/bias' ):
_lowerCamelCase = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCamelCase = vnp.copy() # same because it is one dimensional
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.startswith('model/ln' ):
_lowerCamelCase = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCamelCase = 'model.blocks.%d.feed_forward.norm.bias' % player
_lowerCamelCase = vnp.copy() # same because it is one dimensional
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.endswith('/g' ):
_lowerCamelCase = 'model.blocks.%d.feed_forward.norm.weight' % player
_lowerCamelCase = vnp.copy() # same because it is one dimensional
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.startswith('model/att' ):
_lowerCamelCase = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCamelCase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCamelCase = state[:, 0, :, :]
_lowerCamelCase = state[:, 1, :, :]
_lowerCamelCase = state[:, 2, :, :]
_lowerCamelCase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCamelCase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCamelCase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCamelCase = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCamelCase = torch.tensor(snake_case )
_lowerCamelCase = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCamelCase = torch.tensor(snake_case )
_lowerCamelCase = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.endswith('/o/kernel' ):
_lowerCamelCase = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCamelCase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.startswith('model/an' ):
_lowerCamelCase = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCamelCase = 'model.blocks.%d.self_attn.norm.bias' % player
_lowerCamelCase = vnp.copy() # same because it is one dimensional
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.endswith('/g' ):
_lowerCamelCase = 'model.blocks.%d.self_attn.norm.weight' % player
_lowerCamelCase = vnp.copy() # same because it is one dimensional
_lowerCamelCase = torch.tensor(snake_case )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCamelCase = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCamelCase = 'model.%s.weight' % nlayer
_lowerCamelCase = vnp.copy() # same in embedded
_lowerCamelCase = torch.tensor(snake_case )
if key_name.startswith('model/wte' ):
_lowerCamelCase = 'lm_head.weight'
_lowerCamelCase = vnp.copy() # same in embedded
_lowerCamelCase = torch.tensor(snake_case )
elif key_name.startswith('model/wob' ):
_lowerCamelCase = 'final_logits_bias'
_lowerCamelCase = vnp.copy() # same in embedded
_lowerCamelCase = state.reshape((1, -1) )
_lowerCamelCase = torch.tensor(snake_case )
elif key_name == "model/dense/kernel":
_lowerCamelCase = 'model.last_project.weight'
_lowerCamelCase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCamelCase = torch.tensor(snake_case )
elif key_name == "model/dense_1/bias":
_lowerCamelCase = 'model.last_project.bias'
_lowerCamelCase = vnp.copy() # same because it is one dimensional
_lowerCamelCase = torch.tensor(snake_case )
torch.save(snake_case , args.output )
if __name__ == "__main__":
A_ : List[Any] =argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
A_ : Optional[int] =parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 80 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def SCREAMING_SNAKE_CASE_ ( )-> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_lowerCamelCase = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
assert _test_patching.open is open
_lowerCamelCase = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
# pandas.read_csv is not present in _test_patching
_lowerCamelCase = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , snake_case ):
pass
def SCREAMING_SNAKE_CASE_ ( )-> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
_lowerCamelCase = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , snake_case ) is None
with patch_submodule(_test_patching , 'len' , snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def SCREAMING_SNAKE_CASE_ ( )-> Any:
_lowerCamelCase = '__test_patch_submodule_start_and_stop_mock__'
_lowerCamelCase = patch_submodule(_test_patching , 'open' , snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_lowerCamelCase = '__test_patch_submodule_successive_join__'
_lowerCamelCase = '__test_patch_submodule_successive_dirname__'
_lowerCamelCase = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
with patch_submodule(_test_patching , 'os.rename' , snake_case ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , snake_case ):
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
_lowerCamelCase = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , snake_case ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , snake_case ):
pass
| 80 | 1 |
"""simple docstring"""
import heapq
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase__ = heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase__ = elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 98 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | 1 |
import datasets
_snake_case = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
_snake_case = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
_snake_case = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
}), codebase_urls=[], reference_urls=[], format="numpy", )
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return {"accuracy": simple_accuracy(__a, __a)}
| 300 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300 | 1 |
A__ : Tuple = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
A__ : Union[str, Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = from_type.lower().strip('''s''' )
lowercase__ = to_type.lower().strip('''s''' )
lowercase__ = UNIT_SYMBOL.get(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = UNIT_SYMBOL.get(lowerCamelCase_ , lowerCamelCase_ )
if from_sanitized not in METRIC_CONVERSION:
lowercase__ = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(lowerCamelCase_ )}"""
)
raise ValueError(lowerCamelCase_ )
if to_sanitized not in METRIC_CONVERSION:
lowercase__ = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(lowerCamelCase_ )}"""
)
raise ValueError(lowerCamelCase_ )
lowercase__ = METRIC_CONVERSION[from_sanitized]
lowercase__ = METRIC_CONVERSION[to_sanitized]
lowercase__ = 1
if from_exponent > to_exponent:
lowercase__ = from_exponent - to_exponent
else:
lowercase__ = -(to_exponent - from_exponent)
return value * pow(10 , lowerCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 207 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase__ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
lowercase__ = model.state_dict()
def to_tf_var_name(lowerCamelCase_ ):
for patt, repl in iter(lowerCamelCase_ ):
lowercase__ = name.replace(lowerCamelCase_ , lowerCamelCase_ )
return F"""bert/{name}"""
def create_tf_var(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(lowerCamelCase_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ )
tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = session.run(lowerCamelCase_ )
print(F"""Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}""" )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def a ( lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory in which to save tensorflow model''' )
lowercase__ = parser.parse_args(lowerCamelCase_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 207 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowercase : Any = [
"good first issue",
"feature request",
"wip",
]
def SCREAMING_SNAKE_CASE__ ( ) -> int:
_snake_case = Github(os.environ['GITHUB_TOKEN'] )
_snake_case = g.get_repo('huggingface/accelerate' )
_snake_case = repo.get_issues(state='open' )
for issue in open_issues:
_snake_case = sorted([comment for comment in issue.get_comments()] , key=lambda __A : i.created_at , reverse=__A )
_snake_case = comments[0] if len(__A ) > 0 else None
_snake_case = dt.utcnow()
_snake_case = (current_time - issue.updated_at).days
_snake_case = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 160 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> tuple:
_snake_case = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase = Features({'audio': Audio()} )
lowercase = Features({'transcription': Value('string' )} )
lowercase = "audio"
lowercase = "transcription"
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,__UpperCamelCase ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
lowercase_ : int = copy.deepcopy(self )
lowercase_ : List[str] = self.input_schema.copy()
lowercase_ : List[str] = features[self.audio_column]
lowercase_ : List[str] = input_schema
return task_template
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 213 | """simple docstring"""
__SCREAMING_SNAKE_CASE =[
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 213 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = GPTSanJapaneseTokenizer
_UpperCAmelCase :Dict = False
_UpperCAmelCase :str = {"do_clean_text": False, "add_prefix_space": False}
def _snake_case ( self ):
super().setUp()
# fmt: off
lowercase__: Optional[int] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
lowercase__: Union[str, Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
lowercase__: Union[str, Any] = {'''unk_token''': '''<unk>'''}
lowercase__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(_UpperCAmelCase ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
lowercase__: Union[str, Any] = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def _snake_case ( self , _UpperCAmelCase ):
lowercase__, lowercase__: Tuple = self.get_input_output_texts(_UpperCAmelCase )
lowercase__: List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: str = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def _snake_case ( self ):
pass # TODO add if relevant
def _snake_case ( self ):
pass # TODO add if relevant
def _snake_case ( self ):
pass # TODO add if relevant
def _snake_case ( self ):
lowercase__: Tuple = self.get_tokenizer()
# Testing tokenization
lowercase__: str = '''こんにちは、世界。 こんばんは、㔺界。'''
lowercase__: int = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
lowercase__: int = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
lowercase__: int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase__: Union[str, Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
lowercase__: List[Any] = tokens + [tokenizer.unk_token]
lowercase__: str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase__: Tuple = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.get_tokenizer()
# Testing tokenization
lowercase__: Tuple = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
lowercase__: int = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
lowercase__: Dict = tokenizer.encode(_UpperCAmelCase )
lowercase__: Any = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def _snake_case ( self ):
lowercase__: Any = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowercase__: Union[str, Any] = '''こんにちは、世界。'''
lowercase__: Union[str, Any] = '''こんばんは、㔺界。😀'''
lowercase__: int = '''こんにちは、世界。こんばんは、世界。😀'''
lowercase__: Dict = tokenizer.encode(prefix_text + input_text )
lowercase__: Tuple = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
lowercase__: Tuple = tokenizer.encode(_UpperCAmelCase , prefix_text=_UpperCAmelCase )
lowercase__: Tuple = tokenizer.decode(_UpperCAmelCase )
lowercase__: str = tokenizer.decode(_UpperCAmelCase )
lowercase__: Optional[int] = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def _snake_case ( self ):
lowercase__: List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowercase__: Union[str, Any] = '''こんにちは、世界。'''
lowercase__: Optional[int] = '''こんばんは、㔺界。😀'''
lowercase__: Union[str, Any] = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
lowercase__: Tuple = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
lowercase__: Dict = [1] + [0] * (len_prefix + len_text + 1)
lowercase__: Optional[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowercase__: int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase__: List[str] = tokenizer(prefix_text + input_text ).token_type_ids
lowercase__: Optional[int] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
lowercase__: Dict = tokenizer(_UpperCAmelCase , prefix_text=_UpperCAmelCase ).token_type_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowercase__: Dict = tokenizer.encode('''あンいワ''' )
lowercase__: int = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
lowercase__: Optional[int] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _snake_case ( self ):
lowercase__: int = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowercase__: Optional[int] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
lowercase__: List[Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.batch_encode_plus(_UpperCAmelCase , padding=_UpperCAmelCase )
# fmt: off
lowercase__: str = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowercase__: Dict = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase__: Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , _UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , _UpperCAmelCase )
def _snake_case ( self ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _snake_case ( self ):
# tokenizer has no padding token
pass
| 2 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: Dict = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) )
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: Union[str, Any] = mock.Mock()
lowercase__: str = 500
lowercase__: Union[str, Any] = {}
lowercase__: List[str] = HTTPError
lowercase__: int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head:
lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
def _snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' )
lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
| 2 | 1 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __lowerCamelCase ( lowerCAmelCase_ = True , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
_a : Tuple = False
if main_process_only:
_a : str = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = RoFormerTokenizer
_snake_case = RoFormerTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ) -> Any:
super().setUp()
def UpperCAmelCase ( self , **A ) -> List[Any]:
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A )
def UpperCAmelCase ( self , **A ) -> Union[str, Any]:
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A )
def UpperCAmelCase ( self ) -> Dict:
snake_case : int = """永和服装饰品有限公司,今天天气非常好"""
snake_case : List[Any] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = self.get_tokenizer()
snake_case : Any = self.get_chinese_input_output_texts()
snake_case : Optional[Any] = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
snake_case : Optional[Any] = tokens + [tokenizer.unk_token]
snake_case : List[str] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : str = self.get_chinese_input_output_texts()
snake_case : int = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
snake_case : int = tokens + [tokenizer.unk_token]
snake_case : str = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> Tuple:
pass
| 368 |
from math import pow
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
snake_case : Union[str, Any] = int(pow(lowercase ,lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
snake_case , snake_case : List[Any] = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
snake_case , snake_case : str = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
return current_sum, solutions_count
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 | 0 |
'''simple docstring'''
import inspect
import unittest
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[Any] ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase__ ( self : Tuple ):
import diffusers
from diffusers.dependency_versions_table import deps
_a = inspect.getmembers(__a , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
_a = "k-diffusion"
elif backend == "invisible_watermark":
_a = "invisible-watermark"
assert backend in deps, f'{backend} is not in the deps table!'
| 63 |
from jiwer import compute_measures
import datasets
lowerCAmelCase : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCAmelCase : List[Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
lowerCAmelCase : Dict = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _A ( datasets.Metric):
def UpperCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["wer"]
else:
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for prediction, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 253 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
UpperCamelCase = datasets.logging.get_logger(__name__)
UpperCamelCase = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
UpperCamelCase = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
UpperCamelCase = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
UpperCamelCase = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def a ( self : Any ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." )
lowerCAmelCase__ = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCAmelCase__ = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCAmelCase__ = self.config_name.upper()
else:
raise KeyError(
f'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCAmelCase__ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCAmelCase__ = score.BleurtScorer(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> Any:
lowerCAmelCase__ = self.scorer.score(references=SCREAMING_SNAKE_CASE__ , candidates=SCREAMING_SNAKE_CASE__ )
return {"scores": scores}
| 221 |
import argparse
from collections import defaultdict
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(lowerCAmelCase_ , "r" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = F'class {class_name}('
lowerCAmelCase__ = F'{4 * " "}def {test_name}('
lowerCAmelCase__ = F'{8 * " "}{correct_line.split()[0]}'
lowerCAmelCase__ = F'{16 * " "}{correct_line.split()[0]}'
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = []
for line in lines:
if line.startswith(lowerCAmelCase_ ):
lowerCAmelCase__ = True
elif in_class and line.startswith(lowerCAmelCase_ ):
lowerCAmelCase__ = True
elif in_class and in_func and (line.startswith(lowerCAmelCase_ ) or line.startswith(lowerCAmelCase_ )):
lowerCAmelCase__ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCAmelCase__ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCAmelCase__ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
lowerCAmelCase__ = lowerCAmelCase__ = lowerCAmelCase__ = lowerCAmelCase__ = False
else:
new_lines.append(lowerCAmelCase_ )
with open(lowerCAmelCase_ , "w" ) as f:
for line in new_lines:
f.write(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any=None ):
"""simple docstring"""
if fail is not None:
with open(lowerCAmelCase_ , "r" ) as f:
lowerCAmelCase__ = {l.strip() for l in f.readlines()}
else:
lowerCAmelCase__ = None
with open(lowerCAmelCase_ , "r" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = defaultdict(lowerCAmelCase_ )
for line in correct_lines:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
UpperCamelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 221 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Tuple=7 ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : int=False ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Any=19 ,lowerCamelCase__ : int=32 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : Optional[int]=37 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[int]=512 ,lowerCamelCase__ : int=16 ,lowerCamelCase__ : List[Any]=2 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : List[str]=None ,) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=33 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=__SCREAMING_SNAKE_CASE ,esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} ,)
return config
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Optional[Any] = False
__snake_case : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
__snake_case : List[Any] = ()
__snake_case : Tuple = {} if is_torch_available() else {}
__snake_case : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = EsmFoldModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip("""Does not support attention outputs""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""ESMFold only has one output format.""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""This test doesn\'t work for ESMFold and doesn\'t test core functionality""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support data parallel.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )["""positions"""]
SCREAMING_SNAKE_CASE = torch.tensor([2.5828, 0.7993, -10.9334] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
| 296 | import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 338 | 0 |
from math import isqrt
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 10**8 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = calculate_prime_numbers(max_number // 2 )
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = len(__lowerCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"{solution() = }") | 362 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase ) | 325 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
UpperCAmelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
UpperCAmelCase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase_ ( ):
lowercase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase = bs[:]
lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
lowercase = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = set()
lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase = char
return pairs
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , **snake_case , ):
lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='utf-8' ) as vocab_handle:
lowercase = json.load(snake_case_ )
lowercase = {v: k for k, v in self.encoder.items()}
lowercase = errors # how to handle errors in decoding
lowercase = bytes_to_unicode()
lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='utf-8' ) as merges_handle:
lowercase = merges_handle.read().split('\n' )[1:-1]
lowercase = [tuple(merge.split() ) for merge in bpe_merges]
lowercase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
lowercase = {}
lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if token in self.cache:
return self.cache[token]
lowercase = tuple(snake_case_ )
lowercase = get_pairs(snake_case_ )
if not pairs:
return token
while True:
lowercase = min(snake_case_ , key=lambda snake_case : self.bpe_ranks.get(snake_case_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase = bigram
lowercase = []
lowercase = 0
while i < len(snake_case_ ):
try:
lowercase = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase = tuple(snake_case_ )
lowercase = new_word
if len(snake_case_ ) == 1:
break
else:
lowercase = get_pairs(snake_case_ )
lowercase = ' '.join(snake_case_ )
lowercase = word
return word
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
for token in re.findall(self.pat , snake_case_ ):
lowercase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.decoder.get(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = ''.join(snake_case_ )
lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '\n' )
lowercase = 0
with open(snake_case_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowercase = token_index
writer.write(' '.join(snake_case_ ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=False , **snake_case ):
lowercase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
lowercase = ' ' + text
return (text, kwargs)
| 195 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = n
while left <= right:
_UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __A( datasets.BuilderConfig ):
snake_case_ = None
class __A( datasets.ArrowBasedBuilder ):
snake_case_ = PandasConfig
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Any:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_snake_case , (str, list, tuple) ):
__a = data_files
if isinstance(_snake_case , _snake_case ):
__a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__a = [dl_manager.iter_files(_snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__a = []
for split_name, files in data_files.items():
if isinstance(_snake_case , _snake_case ):
__a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__a = [dl_manager.iter_files(_snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=_snake_case , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__a = table_cast(_snake_case , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_snake_case ) ):
with open(_snake_case , '''rb''' ) as f:
__a = pa.Table.from_pandas(pd.read_pickle(_snake_case ) )
yield i, self._cast_table(_snake_case ) | 33 |
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ = None ) -> list[list[str]]:
__a = word_bank or []
# create a table
__a = len(a__ ) + 1
__a = []
for _ in range(a__ ):
table.append([] )
# seed value
__a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
__a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
) | 33 | 1 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase__ : List[str] = False
try:
lowerCAmelCase__ : str = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class __snake_case :
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = [] ) -> int:
'''simple docstring'''
snake_case__ : List[str] = 0
snake_case__ : List[str] = choices
snake_case__ : str = prompt
if sys.platform == "win32":
snake_case__ : str = '*'
else:
snake_case__ : Tuple = '➔ '
def __a ( self , __UpperCamelCase , __UpperCamelCase = "" ) -> Optional[int]:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __UpperCamelCase )
else:
forceWrite(self.choices[index] , __UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(__UpperCamelCase )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def __a ( self , __UpperCamelCase , __UpperCamelCase = 1 ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__UpperCamelCase )
move_cursor(__UpperCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def __a ( self ) -> List[Any]:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def __a ( self ) -> List[str]:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def __a ( self ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__UpperCamelCase )] for number in range(10 )] )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(chr(self.current_selection ) )
snake_case__ : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __UpperCamelCase )
else:
return
else:
return
def __a ( self , __UpperCamelCase = 0 ) -> str:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
snake_case__ : Optional[Any] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__UpperCamelCase )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
snake_case__ : Union[str, Any] = int(builtins.input() )
except ValueError:
snake_case__ : Dict = default_choice
else:
snake_case__ : List[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(__UpperCamelCase , '\n' )
return choice
| 143 | from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __snake_case :
__lowerCamelCase = XGLMConfig
__lowerCamelCase = {}
__lowerCamelCase = """gelu"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=14 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=0.0_2 , ) -> str:
'''simple docstring'''
snake_case__ : Any = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : Optional[int] = use_input_mask
snake_case__ : Any = use_labels
snake_case__ : List[str] = vocab_size
snake_case__ : List[Any] = d_model
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : str = ffn_dim
snake_case__ : Optional[Any] = activation_function
snake_case__ : str = activation_dropout
snake_case__ : int = attention_dropout
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Optional[int] = initializer_range
snake_case__ : List[str] = None
snake_case__ : List[str] = 0
snake_case__ : Optional[int] = 2
snake_case__ : Union[str, Any] = 1
def __a ( self ) -> List[str]:
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = self.get_config()
snake_case__ : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __a ( self ) -> Any:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCamelCase , )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Any = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Tuple = config_and_inputs
snake_case__ : Tuple = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = TFXGLMModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=__UpperCamelCase , n_embd=37 )
def __a ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = TFXGLMModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __a ( self ) -> Any:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self , __UpperCamelCase=True ) -> int:
'''simple docstring'''
snake_case__ : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
snake_case__ : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case__ : List[str] = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
snake_case__ : int = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCamelCase )
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
snake_case__ : Dict = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
snake_case__ : Any = tokenizer('Today is a nice day and' , return_tensors='tf' )
snake_case__ : Dict = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
snake_case__ : Optional[int] = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase , seed=[7, 0] )
snake_case__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : str = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
snake_case__ : Optional[int] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
snake_case__ : Any = 'left'
# use different length sentences to test batching
snake_case__ : int = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
snake_case__ : Any = tokenizer(__UpperCamelCase , return_tensors='tf' , padding=__UpperCamelCase )
snake_case__ : List[Any] = inputs['input_ids']
snake_case__ : List[str] = model.generate(input_ids=__UpperCamelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
snake_case__ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
snake_case__ : str = model.generate(input_ids=__UpperCamelCase , max_new_tokens=12 )
snake_case__ : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
snake_case__ : Dict = model.generate(input_ids=__UpperCamelCase , max_new_tokens=12 )
snake_case__ : List[Any] = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
snake_case__ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : Union[str, Any] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [non_padded_sentence, padded_sentence] )
| 143 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a_ ( unittest.TestCase ):
@slow
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_SCREAMING_SNAKE_CASE ):
return model(**_SCREAMING_SNAKE_CASE )
eval(**_SCREAMING_SNAKE_CASE ).block_until_ready()
@slow
def A__ ( self ) -> Any:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_SCREAMING_SNAKE_CASE ):
return model(**_SCREAMING_SNAKE_CASE )
eval(**_SCREAMING_SNAKE_CASE ).block_until_ready()
def A__ ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = FlaxAutoModel.from_pretrained("""bert-base""" )
def A__ ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = FlaxAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , revision="""aaaaaa""" )
def A__ ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
UpperCamelCase = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def A__ ( self ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , """Use `from_pt=True` to load this model""" ):
UpperCamelCase = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 183 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
SCREAMING_SNAKE_CASE__ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
SCREAMING_SNAKE_CASE__ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , ) -> List[Any]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase = np.array([re.sub(_SCREAMING_SNAKE_CASE , """""" , _SCREAMING_SNAKE_CASE ) for x in predictions] )
UpperCamelCase = np.array([re.sub(_SCREAMING_SNAKE_CASE , """""" , _SCREAMING_SNAKE_CASE ) for x in references] )
else:
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE )
if ignore_case:
UpperCamelCase = np.char.lower(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.char.lower(_SCREAMING_SNAKE_CASE )
if ignore_punctuation:
UpperCamelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
if ignore_numbers:
UpperCamelCase = string.digits.maketrans("""""" , """""" , string.digits )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
UpperCamelCase = predictions == references
return {"exact_match": np.mean(_SCREAMING_SNAKE_CASE ) * 100}
| 183 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : int , A_ : Optional[Any] , A_ : List[str]=7 , A_ : List[Any]=3 , A_ : int=1_8 , A_ : List[Any]=3_0 , A_ : Tuple=4_0_0 , A_ : Optional[Any]=True , A_ : int=None , A_ : str=True , A_ : str=None , A_ : List[str]=True , A_ : Optional[Any]=[0.5, 0.5, 0.5] , A_ : Optional[int]=[0.5, 0.5, 0.5] , ):
lowerCAmelCase_ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 1_8}
lowerCAmelCase_ : Dict = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = num_channels
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Optional[Any] = min_resolution
lowerCAmelCase_ : List[Any] = max_resolution
lowerCAmelCase_ : Optional[Any] = do_resize
lowerCAmelCase_ : Any = size
lowerCAmelCase_ : Optional[int] = do_center_crop
lowerCAmelCase_ : List[str] = crop_size
lowerCAmelCase_ : Optional[Any] = do_normalize
lowerCAmelCase_ : Tuple = image_mean
lowerCAmelCase_ : Optional[int] = image_std
def UpperCAmelCase__ ( self : List[Any]):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
_a = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : int = LevitImageProcessingTester(self)
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A_ , '''image_mean'''))
self.assertTrue(hasattr(A_ , '''image_std'''))
self.assertTrue(hasattr(A_ , '''do_normalize'''))
self.assertTrue(hasattr(A_ , '''do_resize'''))
self.assertTrue(hasattr(A_ , '''do_center_crop'''))
self.assertTrue(hasattr(A_ , '''size'''))
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8})
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8})
lowerCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2})
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4})
def UpperCAmelCase__ ( self : Any):
pass
def UpperCAmelCase__ ( self : Optional[int]):
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_)
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image)
# Test not batched input
lowerCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : int):
# Initialize image_processing
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray)
# Test not batched input
lowerCAmelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Any):
# Initialize image_processing
lowerCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor)
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Any = image_processing(A_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 103 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCamelCase( __UpperCamelCase : List[str] ):
lowerCAmelCase_ : List[str] = SwinvaConfig()
lowerCAmelCase_ : List[str] = swinva_name.split('''_''' )
lowerCAmelCase_ : str = name_split[1]
if "to" in name_split[3]:
lowerCAmelCase_ : List[Any] = int(name_split[3][-3:] )
else:
lowerCAmelCase_ : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
lowerCAmelCase_ : List[str] = int(name_split[2][-2:] )
else:
lowerCAmelCase_ : int = int(name_split[2][6:] )
if model_size == "tiny":
lowerCAmelCase_ : Any = 96
lowerCAmelCase_ : List[str] = (2, 2, 6, 2)
lowerCAmelCase_ : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase_ : List[str] = 96
lowerCAmelCase_ : Any = (2, 2, 18, 2)
lowerCAmelCase_ : Dict = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase_ : Union[str, Any] = 128
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : Tuple = (4, 8, 16, 32)
else:
lowerCAmelCase_ : Optional[Any] = 192
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : List[Any] = (6, 12, 24, 48)
if "to" in swinva_name:
lowerCAmelCase_ : Union[str, Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowerCAmelCase_ : Optional[int] = 21841
lowerCAmelCase_ : Any = '''huggingface/label-files'''
lowerCAmelCase_ : Tuple = '''imagenet-22k-id2label.json'''
lowerCAmelCase_ : Any = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : str = idalabel
lowerCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase_ : Optional[int] = 1000
lowerCAmelCase_ : Tuple = '''huggingface/label-files'''
lowerCAmelCase_ : Union[str, Any] = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : int = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = img_size
lowerCAmelCase_ : Dict = num_classes
lowerCAmelCase_ : Dict = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Optional[int] = num_heads
lowerCAmelCase_ : Dict = window_size
return config
def UpperCamelCase( __UpperCamelCase : List[str] ):
if "patch_embed.proj" in name:
lowerCAmelCase_ : Dict = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ : List[Any] = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ : int = '''encoder.''' + name
if "attn.proj" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ : Tuple = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ : Tuple = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "q_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''q_bias''' ,'''query.bias''' )
if "k_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''k_bias''' ,'''key.bias''' )
if "v_bias" in name:
lowerCAmelCase_ : int = name.replace('''v_bias''' ,'''value.bias''' )
if "cpb_mlp" in name:
lowerCAmelCase_ : Any = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' )
if name == "norm.weight":
lowerCAmelCase_ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ : Any = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ : int = name.replace('''head''' ,'''classifier''' )
else:
lowerCAmelCase_ : Union[str, Any] = '''swinv2.''' + name
return name
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : Optional[int] = orig_state_dict.pop(__UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase_ : Dict = key.split('''.''' )
lowerCAmelCase_ : Any = int(key_split[1] )
lowerCAmelCase_ : Optional[int] = int(key_split[3] )
lowerCAmelCase_ : Dict = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase_ : Optional[Any] = val[:dim, :]
lowerCAmelCase_ : Any = val[dim : dim * 2, :]
lowerCAmelCase_ : List[Any] = val[-dim:, :]
else:
lowerCAmelCase_ : Dict = val[:dim]
lowerCAmelCase_ : Union[str, Any] = val[
dim : dim * 2
]
lowerCAmelCase_ : Dict = val[-dim:]
else:
lowerCAmelCase_ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
lowerCAmelCase_ : List[str] = get_swinva_config(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = SwinvaForImageClassification(__UpperCamelCase )
model.eval()
lowerCAmelCase_ : str = convert_state_dict(timm_model.state_dict() ,__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' ,'''-''' ) ) )
lowerCAmelCase_ : Union[str, Any] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
lowerCAmelCase_ : Optional[Any] = image_processor(images=__UpperCamelCase ,return_tensors='''pt''' )
lowerCAmelCase_ : List[str] = timm_model(inputs['''pixel_values'''] )
lowerCAmelCase_ : Union[str, Any] = model(**__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase ,__UpperCamelCase ) ,organization='''nandwalritik''' ,commit_message='''Add model''' ,)
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Optional[Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 103 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 |
"""simple docstring"""
import os
def __lowercase ( _a ):
snake_case_ : Tuple = len(grid[0] )
snake_case_ : Optional[int] = len(_a )
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_a ):
for j in range(n_rows - 3 ):
snake_case_ : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case_ : int = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case_ : Dict = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case_ : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case_ : List[str] = max(
_a , _a , _a , _a )
if max_product > largest:
snake_case_ : str = max_product
return largest
def __lowercase ( ):
snake_case_ : Tuple = []
with open(os.path.dirname(_a ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
snake_case_ : List[str] = [[int(_a ) for i in grid[j]] for j in range(len(_a ) )]
return largest_product(_a )
if __name__ == "__main__":
print(solution())
| 155 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def _UpperCamelCase ( __A ) -> typing.Counter[int]:
'''simple docstring'''
UpperCamelCase__ = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__A , max_perimeter + 1 ):
UpperCamelCase__ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__A ):
UpperCamelCase__ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _UpperCamelCase ( __A = 1000 ) -> int:
'''simple docstring'''
UpperCamelCase__ = pythagorean_triple(__A )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 80 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 80 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase : Optional[int] = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__UpperCAmelCase : Optional[int] = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__UpperCAmelCase : Any = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__UpperCAmelCase : List[Any] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : int , A : int , A : float , **A : Optional[int] ):
__snake_case: List[str] = feature_size
__snake_case: Optional[int] = sampling_rate
__snake_case: Any = padding_value
__snake_case: Dict = kwargs.pop("""padding_side""" , """right""" )
__snake_case: Union[str, Any] = kwargs.pop("""return_attention_mask""" , A )
super().__init__(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case: Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
__snake_case: Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
__snake_case: Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case: int = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case: Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
__snake_case: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
__snake_case: str = """tf"""
elif is_torch_tensor(A ):
__snake_case: str = """pt"""
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
__snake_case: List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case: List[Any] = to_numpy(A )
else:
__snake_case: Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case: Union[str, Any] = self._get_padding_strategies(padding=A , max_length=A )
__snake_case: Any = processed_features[self.model_input_names[0]]
__snake_case: int = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__snake_case: Union[str, Any] = []
for i in range(A ):
__snake_case: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case: Tuple = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case: Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case: List[str] = PaddingStrategy.MAX_LENGTH
__snake_case: List[Any] = {}
for i in range(A ):
# padding
__snake_case: Any = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case: str = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def UpperCAmelCase__ ( self : int , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ):
__snake_case: List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case: List[str] = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case: List[str] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case: Any = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
__snake_case: Optional[int] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__snake_case: Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case: Union[str, Any] = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__snake_case: Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case: str = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Tuple = len(A ) > max_length
if needs_to_be_truncated:
__snake_case: List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case: int = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : int , A : int=False , A : int=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__snake_case: Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
__snake_case: Optional[int] = PaddingStrategy(A )
elif isinstance(A , A ):
__snake_case: Any = padding
else:
__snake_case: Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 293 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''openai/whisper-base'''
__UpperCamelCase = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__UpperCamelCase = '''transcriber'''
__UpperCamelCase = WhisperProcessor
__UpperCamelCase = WhisperForConditionalGeneration
__UpperCamelCase = ['''audio''']
__UpperCamelCase = ['''text''']
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Union[str, Any] ):
'''simple docstring'''
return self.pre_processor(snake_case , return_tensors="pt" ).input_features
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Any ):
'''simple docstring'''
return self.model.generate(inputs=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Tuple ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0]
| 300 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : List[Any] = val
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
if "handwritten" in checkpoint_url:
A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase )
A_ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Optional[Any] = 1024
A_ : Union[str, Any] = 4096
A_ : Union[str, Any] = 24
A_ : List[Any] = 16
A_ : List[str] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Dict = False
A_ : int = "relu"
A_ : Optional[int] = 1024
A_ : Any = True
A_ : List[Any] = False
A_ : Optional[int] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
A_ : str = TrOCRForCausalLM(_lowerCAmelCase )
A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"]
A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(_lowerCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A_ : List[str] = val
else:
A_ : Optional[Any] = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" )
A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
A_ : Tuple = outputs.logits
A_ : Union[str, Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : str = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_SCREAMING_SNAKE_CASE = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def snake_case ( snake_case__ :Optional[Any]) -> List[Any]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_A = list(s_dict.keys())
for key in keys:
_A = R""".*/layers_(\d+)"""
_A = key
if re.match(_UpperCamelCase , _UpperCamelCase):
_A = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , _UpperCamelCase)
_A = R"""(encoder|decoder)\/"""
if re.match(_UpperCamelCase , _UpperCamelCase):
_A = re.match(_UpperCamelCase , _UpperCamelCase).groups()
if groups[0] == "encoder":
_A = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , _UpperCamelCase)
_A = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , _UpperCamelCase)
elif groups[0] == "decoder":
_A = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , _UpperCamelCase)
_A = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , _UpperCamelCase)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_A = new_key.replace(_UpperCamelCase , _UpperCamelCase)
print(F'''{key} -> {new_key}''')
_A = s_dict.pop(_UpperCamelCase)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_A = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_A = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
_A = s_dict[key].shape[0]
_A = s_dict[key]
for idx in range(_UpperCamelCase):
_A = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(_UpperCamelCase)
return s_dict
_SCREAMING_SNAKE_CASE = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :str) -> int:
# Convert a google style config to the hugging face fromat
import regex as re
with open(_UpperCamelCase , """r""") as f:
_A = f.read()
_A = re.findall(R"""(.*) = ([0-9.]*)""" , _UpperCamelCase)
_A = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_A = float(_UpperCamelCase) if """.""" in value else int(_UpperCamelCase)
_A = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , _UpperCamelCase)[0]
_A = str(activation[1])
_A = num_experts
_A = SwitchTransformersConfig(**_UpperCamelCase)
return config
def snake_case ( snake_case__ :Tuple , snake_case__ :Optional[Any] , snake_case__ :List[str]=None , snake_case__ :Any="./" , snake_case__ :Any=8) -> Any:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
_A = checkpoints.load_tax_checkpoint(_UpperCamelCase)
if gin_file is not None:
_A = convert_gin_to_config(_UpperCamelCase , _UpperCamelCase)
else:
_A = SwitchTransformersConfig.from_pretrained(_UpperCamelCase)
_A = SwitchTransformersForConditionalGeneration(_UpperCamelCase)
_A = flax_params["""target"""]
_A = flatten_dict(_UpperCamelCase , sep="""/""")
_A = rename_keys(_UpperCamelCase)
_A = unflatten_dict(_UpperCamelCase , sep="""/""")
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(_UpperCamelCase)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 368 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 81 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def __A ( a_ :float , a_ :float , a_ :float) -> tuple:
__a : Dict = namedtuple('''result''' , '''name value''')
if (voltage, current, power).count(0) != 1:
raise ValueError('''Only one argument must be 0''')
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''')
elif voltage == 0:
return result('''voltage''' , power / current)
elif current == 0:
return result('''current''' , power / voltage)
elif power == 0:
return result('''power''' , float(round(abs(voltage * current) , 2)))
else:
raise ValueError('''Exactly one argument must be 0''')
if __name__ == "__main__":
import doctest
doctest.testmod() | 160 |
"""simple docstring"""
A = 9.80665
def __A ( a_ :float , a_ :float , a_ :float = g) -> float:
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''')
if volume < 0:
raise ValueError('''Impossible Object volume''')
if gravity <= 0:
raise ValueError('''Impossible Gravity''')
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 160 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a__:
def __init__( self : Any , __snake_case : Any , __snake_case : Union[str, Any]=13 , __snake_case : Tuple=7 , __snake_case : List[str]=False , __snake_case : Optional[int]=True , __snake_case : Optional[int]=False , __snake_case : Any=True , __snake_case : Any=33 , __snake_case : List[str]=32 , __snake_case : Dict=5 , __snake_case : Any=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : str=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[Any]=5_12 , __snake_case : Optional[int]=16 , __snake_case : Optional[int]=2 , __snake_case : Tuple=0.02 , __snake_case : Any=3 , __snake_case : Dict=4 , __snake_case : int=None , ):
a : Optional[Any] = parent
a : str = batch_size
a : str = seq_length
a : List[str] = is_training
a : List[str] = use_input_mask
a : Tuple = use_token_type_ids
a : str = use_labels
a : List[Any] = vocab_size
a : Any = hidden_size
a : List[str] = num_hidden_layers
a : Optional[int] = num_attention_heads
a : Union[str, Any] = intermediate_size
a : Optional[int] = hidden_act
a : List[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Tuple = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : List[str] = type_sequence_label_size
a : List[Any] = initializer_range
a : str = num_labels
a : Dict = num_choices
a : str = scope
def lowercase_ ( self : Union[str, Any] ):
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : List[Any] = None
if self.use_input_mask:
a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a : str = None
a : Dict = None
a : Union[str, Any] = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Dict ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : Dict , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Union[str, Any] ):
a : Optional[Any] = EsmModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
a : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
a : List[str] = model(UpperCamelCase__ )
a : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Any , __snake_case : Union[str, Any] ):
a : Tuple = EsmForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
a : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Any , __snake_case : List[Any] , __snake_case : List[Any] ):
a : Optional[int] = self.num_labels
a : Tuple = EsmForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
a : int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : List[str] ):
a : Tuple = self.prepare_config_and_inputs()
(
a
) : int = config_and_inputs
a : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__( __snake_case , __snake_case , unittest.TestCase ):
lowercase__ = False
lowercase__ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = ()
lowercase__ = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def lowercase_ ( self : List[str] ):
a : str = EsmModelTester(self )
a : str = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowercase_ ( self : Tuple ):
self.config_tester.run_common_tests()
def lowercase_ ( self : int ):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowercase_ ( self : Optional[int] ):
a : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : Dict = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowercase_ ( self : str ):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowercase_ ( self : List[Any] ):
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowercase_ ( self : Optional[int] ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : List[str] = EsmModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase_ ( self : Any ):
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()[0]
a : str = EsmEmbeddings(config=UpperCamelCase__ )
a : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
a : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
a : Tuple = create_position_ids_from_input_ids(UpperCamelCase__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase__ , UpperCamelCase__ ) ) )
def lowercase_ ( self : str ):
a : List[str] = self.model_tester.prepare_config_and_inputs()[0]
a : str = EsmEmbeddings(config=UpperCamelCase__ )
a : Optional[int] = torch.empty(2 , 4 , 30 )
a : Any = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
a : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
a : Tuple = embeddings.create_position_ids_from_inputs_embeds(UpperCamelCase__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase__ , UpperCamelCase__ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : str ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : int ):
pass
@require_torch
class a__( __snake_case ):
@slow
def lowercase_ ( self : str ):
with torch.no_grad():
a : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
a : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a : Optional[int] = model(UpperCamelCase__ )[0]
a : str = 33
a : Any = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
a : Optional[int] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self : Any ):
with torch.no_grad():
a : str = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
a : List[str] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a : List[Any] = model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
a : Dict = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) | 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase: Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[str] = ['PoolFormerFeatureExtractor']
lowerCAmelCase: Tuple = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure) | 96 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = GPTSanJapaneseTokenizer
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Dict = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
lowercase__ = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
lowercase__ = {'''unk_token''': '''<unk>'''}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCamelCase ) )
def UpperCamelCase__ (self : Optional[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : Any , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
lowercase__ = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def UpperCamelCase__ (self : Tuple , UpperCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ ,lowercase__ = self.get_input_output_texts(UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
return text, ids
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
# Testing tokenization
lowercase__ = '''こんにちは、世界。 こんばんは、㔺界。'''
lowercase__ = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing conversion to ids without special tokens
lowercase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing conversion to ids with special tokens
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
# Testing tokenization
lowercase__ = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
lowercase__ = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = tokenizer.decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
@slow
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowercase__ = '''こんにちは、世界。'''
lowercase__ = '''こんばんは、㔺界。😀'''
lowercase__ = '''こんにちは、世界。こんばんは、世界。😀'''
lowercase__ = tokenizer.encode(prefix_text + input_text )
lowercase__ = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
lowercase__ = tokenizer.encode(UpperCamelCase , prefix_text=UpperCamelCase )
lowercase__ = tokenizer.decode(UpperCamelCase )
lowercase__ = tokenizer.decode(UpperCamelCase )
lowercase__ = tokenizer.decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
@slow
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowercase__ = '''こんにちは、世界。'''
lowercase__ = '''こんばんは、㔺界。😀'''
lowercase__ = len(tokenizer.encode(UpperCamelCase ) ) - 2
lowercase__ = len(tokenizer.encode(UpperCamelCase ) ) - 2
lowercase__ = [1] + [0] * (len_prefix + len_text + 1)
lowercase__ = [1] * (len_prefix + len_text + 1) + [0]
lowercase__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase__ = tokenizer(prefix_text + input_text ).token_type_ids
lowercase__ = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
lowercase__ = tokenizer(UpperCamelCase , prefix_text=UpperCamelCase ).token_type_ids
self.assertListEqual(UpperCamelCase , UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowercase__ = tokenizer.encode('''あンいワ''' )
lowercase__ = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
lowercase__ = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(UpperCamelCase ) , tokenizer.decode(UpperCamelCase ) )
self.assertEqual(tokenizer.decode(UpperCamelCase ) , tokenizer.decode(UpperCamelCase ) )
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowercase__ = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
lowercase__ = tokenizer(UpperCamelCase , padding=UpperCamelCase )
lowercase__ = tokenizer.batch_encode_plus(UpperCamelCase , padding=UpperCamelCase )
# fmt: off
lowercase__ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowercase__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCamelCase )
self.assertListEqual(x_token.token_type_ids , UpperCamelCase )
self.assertListEqual(x_token.attention_mask , UpperCamelCase )
self.assertListEqual(x_token_a.input_ids , UpperCamelCase )
self.assertListEqual(x_token_a.token_type_ids , UpperCamelCase )
self.assertListEqual(x_token_a.attention_mask , UpperCamelCase )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
| 2 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = []
create_all_state(1 , A , A , [] , A )
return result
def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def _SCREAMING_SNAKE_CASE (A ) -> None:
"""simple docstring"""
for i in total_list:
print(*A )
if __name__ == "__main__":
lowerCamelCase : Tuple = 4
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 2 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase_ = {
"""roberta-base""": 5_1_2,
"""roberta-large""": 5_1_2,
"""roberta-large-mnli""": 5_1_2,
"""distilroberta-base""": 5_1_2,
"""roberta-base-openai-detector""": 5_1_2,
"""roberta-large-openai-detector""": 5_1_2,
}
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[Any] = VOCAB_FILES_NAMES
__a: str = PRETRAINED_VOCAB_FILES_MAP
__a: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a: Optional[Any] = ['''input_ids''', '''attention_mask''']
__a: Optional[Any] = RobertaTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="replace" , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=False , lowercase_=True , **lowercase_ , ) -> Any:
'''simple docstring'''
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , )
lowerCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
lowerCAmelCase_ = getattr(lowercase_ , pre_tok_state.pop('type' ) )
lowerCAmelCase_ = add_prefix_space
lowerCAmelCase_ = pre_tok_class(**lowercase_ )
lowerCAmelCase_ = add_prefix_space
lowerCAmelCase_ = 'post_processor'
lowerCAmelCase_ = getattr(self.backend_tokenizer , lowercase_ , lowercase_ )
if tokenizer_component_instance:
lowerCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase_ = tuple(state['cls'] )
lowerCAmelCase_ = False
if state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
lowerCAmelCase_ = add_prefix_space
lowerCAmelCase_ = True
if state.get('trim_offsets' , lowercase_ ) != trim_offsets:
lowerCAmelCase_ = trim_offsets
lowerCAmelCase_ = True
if changes_to_apply:
lowerCAmelCase_ = getattr(lowercase_ , state.pop('type' ) )
lowerCAmelCase_ = component_class(**lowercase_ )
setattr(self.backend_tokenizer , lowercase_ , lowercase_ )
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else value
lowerCAmelCase_ = value
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> BatchEncoding:
'''simple docstring'''
lowerCAmelCase_ = kwargs.get('is_split_into_words' , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> BatchEncoding:
'''simple docstring'''
lowerCAmelCase_ = kwargs.get('is_split_into_words' , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_=None ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 14 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ )
return (new_height, new_width)
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase_ = get_resize_output_image_size(
lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase_ ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(lowercase_ ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 14 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Any =logging.get_logger(__name__)
A__ : int ={
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class UpperCAmelCase ( _UpperCAmelCase ):
_lowercase: int = """transfo-xl"""
_lowercase: Optional[Any] = ["""mems"""]
_lowercase: Union[str, Any] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[Any] , __snake_case : Optional[Any]=26_77_35 , __snake_case : Tuple=[2_00_00, 4_00_00, 20_00_00] , __snake_case : List[Any]=10_24 , __snake_case : Optional[Any]=10_24 , __snake_case : List[str]=16 , __snake_case : Dict=64 , __snake_case : int=40_96 , __snake_case : List[Any]=4 , __snake_case : str=False , __snake_case : List[Any]=18 , __snake_case : Dict=16_00 , __snake_case : Optional[int]=10_00 , __snake_case : Optional[int]=True , __snake_case : Tuple=True , __snake_case : int=0 , __snake_case : str=-1 , __snake_case : Optional[int]=True , __snake_case : Union[str, Any]=0.1 , __snake_case : List[Any]=0.0 , __snake_case : Optional[int]=True , __snake_case : Optional[Any]="normal" , __snake_case : List[Any]=0.01 , __snake_case : Union[str, Any]=0.01 , __snake_case : Optional[int]=0.02 , __snake_case : Union[str, Any]=1E-5 , __snake_case : str=0 , **__snake_case : Optional[int] , ) -> List[str]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(UpperCAmelCase_ )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowercase__ ( self : List[Any] ) -> List[str]:
# Message copied from Transformer-XL documentation
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def lowercase__ ( self : Tuple , __snake_case : Dict ) -> Any:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 70 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__snake_case = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.feature_extractor
SCREAMING_SNAKE_CASE__ = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = 'bias'
elif "weight" in name:
SCREAMING_SNAKE_CASE__ = 'weight'
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('adaptor.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE__ = int(items[1] )
else:
SCREAMING_SNAKE_CASE__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.weight.shape
SCREAMING_SNAKE_CASE__ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaConfig.from_pretrained(
UpperCamelCase_ , add_adapter=UpperCamelCase_ , adapter_stride=UpperCamelCase_ , adapter_kernel_size=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , output_hidden_size=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = MBartConfig.from_pretrained(UpperCamelCase_ )
# load model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
SCREAMING_SNAKE_CASE__ = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ , use_auth_token=UpperCamelCase_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE__ = WavaVecaModel(UpperCamelCase_ )
recursively_load_weights_wavaveca(model.encoder , UpperCamelCase_ )
# load decoder weights
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase_ )
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
SCREAMING_SNAKE_CASE__ = SpeechEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = MBartaaTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE__ = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = 'mbart50'
SCREAMING_SNAKE_CASE__ = 'wav2vec2'
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = 250004
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=10_24, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_00_04, type=int, help="""`decoder_start_token_id` of model config""")
__snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 176 | 0 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase : Union[str, Any] = get_tests_dir('fixtures/dummy-config.json')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : int = 0
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Union[str, Any] = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : str = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
A : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''fake-roberta''' )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
A : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertEqual(type(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
# Wrong model type will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoConfig.register('''model''' , SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoConfig.register('''bert''' , SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
A : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE )
A : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
A : Any = AutoConfig.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
A : Dict = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE )
A : Dict = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE )
A : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
class A ( __snake_case ):
__magic_name__ = '''new-model'''
try:
AutoConfig.register('''new-model''' , SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
A : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
A : Any = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
A : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 311 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase : Any = 'src/transformers'
lowercase : str = 'docs/source/en/tasks'
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A : Union[str, Any] = f.readlines()
# Find the start prompt.
A : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
A : List[str] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase : int = direct_transformers_import(TRANSFORMERS_PATH)
lowercase : str = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase : Optional[int] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : int = TASK_GUIDE_TO_MODELS[task_guide]
A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
A : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A, A, A : Optional[int] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
A : Optional[int] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 311 | 1 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = np.max(UpperCamelCase__ , axis=-1 , keepdims=UpperCamelCase__ )
A__ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase__ )
class UpperCamelCase__( __A ):
def snake_case__ ( self ,**__UpperCAmelCase ) -> Dict:
A__ = {}
if "second_text" in kwargs:
A__ = kwargs['second_text']
return preprocess_kwargs, {}, {}
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> Dict:
return self.tokenizer(__UpperCAmelCase ,text_pair=__UpperCAmelCase ,return_tensors=self.framework )
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[int]:
return self.model(**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> Tuple:
A__ = model_outputs.logits[0].numpy()
A__ = softmax(__UpperCAmelCase )
A__ = np.argmax(__UpperCAmelCase )
A__ = self.model.config.idalabel[best_class]
A__ = probabilities[best_class].item()
A__ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 221 | """simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ):
"""simple docstring"""
model.train()
A__ = model(UpperCamelCase__ )
A__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
set_seed(42 )
A__ = RegressionModel()
A__ = deepcopy(UpperCamelCase__ )
A__ = RegressionDataset(length=80 )
A__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
A__ = AdamW(params=model.parameters() , lr=1E-3 )
A__ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
A__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
A__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
A__ , A__ , A__ , A__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A__ , A__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ , A__ , A__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
A__ , A__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A__ , A__ = accelerator.gather((ddp_input, ddp_target) )
A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
A__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ , A__ , A__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
A__ , A__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A__ , A__ = accelerator.gather((ddp_input, ddp_target) )
A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
A__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def UpperCAmelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
"""simple docstring"""
A__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A__ , A__ , A__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
A__ , A__ = batch.values()
# Gather the distributed inputs and targs for the base model
A__ , A__ = accelerator.gather((ddp_input, ddp_target) )
A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
A__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def UpperCAmelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
"""simple docstring"""
A__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A__ , A__ , A__ , A__ , A__ , A__ , A__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
A__ , A__ = batch.values()
# Gather the distributed inputs and targs for the base model
A__ , A__ = accelerator.gather((ddp_input, ddp_target) )
A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
A__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = Accelerator()
A__ = RegressionDataset(length=80 )
A__ = DataLoader(UpperCamelCase__ , batch_size=16 )
A__ = RegressionDataset(length=96 )
A__ = DataLoader(UpperCamelCase__ , batch_size=16 )
A__ , A__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = Accelerator()
A__ = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 221 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = "vit"
def __init__( self : List[Any] , UpperCamelCase : Optional[int]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : List[str]=12 , UpperCamelCase : int=30_72 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : int=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : List[str]=1E-1_2 , UpperCamelCase : Any=2_24 , UpperCamelCase : Dict=16 , UpperCamelCase : Dict=3 , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=16 , **UpperCamelCase : List[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Dict = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : List[Any] = qkv_bias
lowerCAmelCase__ : Dict = encoder_stride
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowerCAmelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1E-4
| 212 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _lowerCamelCase ( a_ ):
# to overwrite at feature extractactor specific tests
_lowerCamelCase :Optional[int] = None
_lowerCamelCase :List[Any] = None
@property
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase , """feature_size""" ) )
self.assertTrue(hasattr(UpperCamelCase , """sampling_rate""" ) )
self.assertTrue(hasattr(UpperCamelCase , """padding_value""" ) )
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCamelCase ) == len(UpperCamelCase ) for x, y in zip(UpperCamelCase , processed_features[input_name] ) ) )
lowerCAmelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
lowerCAmelCase__ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
lowerCAmelCase__ : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Optional[int] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
lowerCAmelCase__ : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Dict = feat_extract.model_input_names[0]
lowerCAmelCase__ : Tuple = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
lowerCAmelCase__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Union[str, Any]=False ) -> int:
"""simple docstring"""
def _inputs_have_equal_length(UpperCamelCase : int ):
lowerCAmelCase__ : Optional[Any] = len(input[0] )
for input_slice in input[1:]:
if len(UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase : int , UpperCamelCase : Dict ):
if len(UpperCamelCase ) != len(UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase , UpperCamelCase ):
if not np.allclose(np.asarray(UpperCamelCase ) , np.asarray(UpperCamelCase ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase )
lowerCAmelCase__ : str = feat_extract.model_input_names[0]
lowerCAmelCase__ : str = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Any = self.feat_extract_tester.seq_length_diff
lowerCAmelCase__ : List[str] = self.feat_extract_tester.max_seq_length + pad_diff
lowerCAmelCase__ : Optional[int] = self.feat_extract_tester.min_seq_length
lowerCAmelCase__ : Optional[int] = self.feat_extract_tester.batch_size
lowerCAmelCase__ : str = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCAmelCase__ : List[Any] = feat_extract.pad(UpperCamelCase , padding=UpperCamelCase )
lowerCAmelCase__ : int = input_a[input_name]
lowerCAmelCase__ : Tuple = feat_extract.pad(UpperCamelCase , padding="""longest""" )
lowerCAmelCase__ : Any = input_a[input_name]
lowerCAmelCase__ : List[str] = feat_extract.pad(UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
lowerCAmelCase__ : Dict = input_a[input_name]
lowerCAmelCase__ : str = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""np""" )
lowerCAmelCase__ : Optional[Any] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="""max_length""" )[input_name]
lowerCAmelCase__ : Optional[int] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=UpperCamelCase , return_tensors="""np""" )
lowerCAmelCase__ : int = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ : Any = feat_extract.pad(UpperCamelCase , pad_to_multiple_of=10 )
lowerCAmelCase__ : int = input_a[input_name]
lowerCAmelCase__ : Dict = feat_extract.pad(UpperCamelCase , padding="""longest""" , pad_to_multiple_of=10 )
lowerCAmelCase__ : Union[str, Any] = input_a[input_name]
lowerCAmelCase__ : Optional[int] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=UpperCamelCase )
lowerCAmelCase__ : str = input_a[input_name]
lowerCAmelCase__ : Union[str, Any] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=UpperCamelCase , return_tensors="""np""" , )
lowerCAmelCase__ : List[Any] = input_a[input_name]
self.assertTrue(all(len(UpperCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : Optional[int] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(UpperCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCAmelCase__ : str = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : int=False ) -> List[str]:
"""simple docstring"""
def _inputs_have_equal_length(UpperCamelCase : List[Any] ):
lowerCAmelCase__ : List[str] = len(input[0] )
for input_slice in input[1:]:
if len(UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase : Union[str, Any] , UpperCamelCase : str ):
if len(UpperCamelCase ) != len(UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase , UpperCamelCase ):
if not np.allclose(np.asarray(UpperCamelCase ) , np.asarray(UpperCamelCase ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase )
lowerCAmelCase__ : str = feat_extract.model_input_names[0]
lowerCAmelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCAmelCase__ : str = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=UpperCamelCase )
lowerCAmelCase__ : Dict = input_a[input_name]
lowerCAmelCase__ : str = feat_extract.pad(UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
lowerCAmelCase__ : Optional[int] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
# truncate to smallest with np
lowerCAmelCase__ : Tuple = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=UpperCamelCase , )
lowerCAmelCase__ : List[Any] = input_a[input_name]
lowerCAmelCase__ : List[Any] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
lowerCAmelCase__ : Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
# truncate to middle
lowerCAmelCase__ : List[Any] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase , return_tensors="""np""" , )
lowerCAmelCase__ : List[Any] = input_a[input_name]
lowerCAmelCase__ : List[Any] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase )
lowerCAmelCase__ : List[str] = input_a[input_name]
lowerCAmelCase__ : Any = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
lowerCAmelCase__ : Dict = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , truncation=UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="""longest""" , truncation=UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="""longest""" , truncation=UpperCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="""max_length""" , truncation=UpperCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ : Any = 12
lowerCAmelCase__ : Optional[int] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase , truncation=UpperCamelCase , )
lowerCAmelCase__ : List[Any] = input_a[input_name]
lowerCAmelCase__ : Any = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase , )
lowerCAmelCase__ : Optional[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCAmelCase__ : Union[str, Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCAmelCase__ : Any = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self._check_padding(numpify=UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self._check_padding(numpify=UpperCamelCase )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self._check_truncation(numpify=UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._check_truncation(numpify=UpperCamelCase )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : str = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : str = feat_extract.model_input_names[0]
lowerCAmelCase__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Dict = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
lowerCAmelCase__ : str = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : int = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : int = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
lowerCAmelCase__ : Any = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = self.feat_extract_dict
lowerCAmelCase__ : int = True
lowerCAmelCase__ : str = self.feature_extraction_class(**UpperCamelCase )
lowerCAmelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : List[Any] = [len(UpperCamelCase ) for x in speech_inputs]
lowerCAmelCase__ : Optional[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : List[str] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Tuple = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.feat_extract_dict
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : str = self.feature_extraction_class(**UpperCamelCase )
lowerCAmelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : Any = [len(UpperCamelCase ) for x in speech_inputs]
lowerCAmelCase__ : int = feat_extract.model_input_names[0]
lowerCAmelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Any = min(UpperCamelCase )
lowerCAmelCase__ : Tuple = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 212 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir('''fixtures''')
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: str = mock.Mock()
A: Tuple = 5_00
A: Optional[Any] = {}
A: Tuple = HTTPError
A: int = {}
# Download this model to make sure it's in the cache.
A: Dict = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE_ ) as mock_head:
A: List[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A: Dict = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _snake_case ( cls : List[str] ) -> Any:
'''simple docstring'''
A: Any = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def _snake_case ( cls : List[str] ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def _snake_case ( self : Any ) -> Any:
'''simple docstring'''
A: Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
A: Tuple = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-feature-extractor''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
A: int = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A: Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
A: Tuple = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
A: Tuple = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
A: Optional[int] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
A: Tuple = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=SCREAMING_SNAKE_CASE_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 358 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
pass
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None:
'''simple docstring'''
A: Any = data
A: Node | None = None
def __iter__( self : Optional[int] ) -> List[str]:
'''simple docstring'''
A: List[str] = self
A: Dict = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(SCREAMING_SNAKE_CASE_ )
yield node.data
A: str = node.next_node
@property
def _snake_case ( self : List[str] ) -> bool:
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCamelCase = Node(1)
UpperCamelCase = Node(2)
UpperCamelCase = Node(3)
UpperCamelCase = Node(4)
print(root_node.has_loop) # False
UpperCamelCase = root_node.next_node
print(root_node.has_loop) # True
UpperCamelCase = Node(5)
UpperCamelCase = Node(6)
UpperCamelCase = Node(5)
UpperCamelCase = Node(6)
print(root_node.has_loop) # False
UpperCamelCase = Node(1)
print(root_node.has_loop) # False
| 334 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
def __init__( self : Optional[Any] , A : Dict , A : Tuple=13 , A : List[Any]=7 , A : List[str]=True , A : List[str]=True , A : Any=True , A : Union[str, Any]=True , A : List[Any]=99 , A : Any=16 , A : int=36 , A : Optional[int]=6 , A : List[Any]=6 , A : Union[str, Any]=6 , A : List[str]=37 , A : Optional[Any]="gelu" , A : Tuple=0.1 , A : List[Any]=0.1 , A : List[str]=5_12 , A : Optional[int]=16 , A : Any=2 , A : Dict=0.02 , A : List[Any]=3 , A : Optional[int]=4 , A : List[Any]=None , ) -> Tuple:
lowercase_ : List[Any] = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = seq_length
lowercase_ : Any = is_training
lowercase_ : List[Any] = use_input_mask
lowercase_ : Dict = use_token_type_ids
lowercase_ : Optional[int] = use_labels
lowercase_ : Dict = vocab_size
lowercase_ : Any = embedding_size
lowercase_ : str = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : Optional[int] = num_hidden_groups
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Any = initializer_range
lowercase_ : Union[str, Any] = num_labels
lowercase_ : List[Any] = num_choices
lowercase_ : Optional[Any] = scope
def A ( self : Any ) -> Optional[Any]:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : str = None
if self.use_input_mask:
lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Dict = None
if self.use_token_type_ids:
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Union[str, Any] = None
lowercase_ : Dict = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Tuple ) -> Any:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def A ( self : Union[str, Any] , A : List[Any] , A : Optional[Any] , A : Optional[Any] , A : Any , A : Dict , A : str , A : Optional[int] ) -> str:
lowercase_ : Dict = AlbertModel(config=A )
model.to(A )
model.eval()
lowercase_ : Optional[Any] = model(A , attention_mask=A , token_type_ids=A )
lowercase_ : Dict = model(A , token_type_ids=A )
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : Dict , A : Tuple , A : Optional[Any] , A : Optional[int] , A : Dict , A : str , A : Tuple , A : List[Any] ) -> List[str]:
lowercase_ : str = AlbertForPreTraining(config=A )
model.to(A )
model.eval()
lowercase_ : Any = model(
A , attention_mask=A , token_type_ids=A , labels=A , sentence_order_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def A ( self : Tuple , A : List[str] , A : List[str] , A : Any , A : List[Any] , A : Optional[int] , A : Tuple , A : Optional[int] ) -> Any:
lowercase_ : Union[str, Any] = AlbertForMaskedLM(config=A )
model.to(A )
model.eval()
lowercase_ : Any = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : str , A : Dict , A : str , A : Union[str, Any] , A : str , A : Optional[Any] , A : List[Any] , A : str ) -> Union[str, Any]:
lowercase_ : Optional[Any] = AlbertForQuestionAnswering(config=A )
model.to(A )
model.eval()
lowercase_ : List[str] = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Tuple , A : Dict , A : Tuple , A : Optional[int] , A : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] ) -> List[str]:
lowercase_ : str = self.num_labels
lowercase_ : int = AlbertForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , A : Union[str, Any] , A : List[Any] , A : List[str] , A : Union[str, Any] , A : Optional[int] , A : str , A : Dict ) -> Dict:
lowercase_ : Any = self.num_labels
lowercase_ : Optional[int] = AlbertForTokenClassification(config=A )
model.to(A )
model.eval()
lowercase_ : int = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , A : Optional[int] , A : Dict , A : int , A : Dict , A : str , A : Union[str, Any] , A : Tuple ) -> List[str]:
lowercase_ : Optional[Any] = self.num_choices
lowercase_ : Optional[int] = AlbertForMultipleChoice(config=A )
model.to(A )
model.eval()
lowercase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Optional[Any] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ) -> Any:
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Any = config_and_inputs
lowercase_ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : List[Any] = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str = True
def A ( self : List[Any] , A : Any , A : Optional[Any] , A : Tuple=False ) -> List[Any]:
lowercase_ : List[Any] = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
lowercase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
lowercase_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def A ( self : Tuple ) -> List[Any]:
lowercase_ : Union[str, Any] = AlbertModelTester(self )
lowercase_ : int = ConfigTester(self , config_class=A , hidden_size=37 )
def A ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def A ( self : List[Any] ) -> int:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def A ( self : Optional[int] ) -> Dict:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def A ( self : List[str] ) -> Optional[Any]:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def A ( self : Optional[int] ) -> Any:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def A ( self : int ) -> Dict:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def A ( self : Union[str, Any] ) -> int:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Any = type
self.model_tester.create_and_check_model(*A )
@slow
def A ( self : Optional[int] ) -> Optional[int]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : str = AlbertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def A ( self : Tuple ) -> Dict:
lowercase_ : Union[str, Any] = AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase_ : str = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowercase_ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : Dict = model(A , attention_mask=A )[0]
lowercase_ : int = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , A )
lowercase_ : List[Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1e-4 ) )
| 33 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : List[str] = '''examples/'''
__A : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def lowercase ( __snake_case : int , __snake_case : Any , __snake_case : int ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : int = f.read()
lowercase_ , lowercase_ : List[str] = REPLACE_PATTERNS[pattern]
lowercase_ : Union[str, Any] = replace.replace('''VERSION''' , __snake_case )
lowercase_ : Optional[Any] = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__snake_case )
def lowercase ( __snake_case : int ):
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowercase_ : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : List[str] = f.readlines()
# Find the start of the list.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowercase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
lowercase_ : List[str] = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowercase ( __snake_case : Optional[Any]=False ):
lowercase_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase_ : Optional[Any] = default_version.base_version
elif patch:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ : int = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case , patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ):
lowercase_ : List[Any] = get_version()
lowercase_ : List[str] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ : Any = current_version.base_version
# Check with the user we got that right.
lowercase_ : Tuple = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : str = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 33 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22 | 0 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class a ( unittest.TestCase ):
def UpperCamelCase ( self : List[str] ) -> Optional[Any]:
lowerCamelCase_ = ['a', 'b', 'c']
# Defaults to last layer if both are None
lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , ['c'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , [2] )
# Out indices set to match out features
lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(['a', 'c'] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , ['a', 'c'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(__SCREAMING_SNAKE_CASE , [0, 2] , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , ['a', 'c'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(__SCREAMING_SNAKE_CASE , [-3, -1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , ['a', 'c'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , [-3, -1] )
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
# Stage names must be set
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , __SCREAMING_SNAKE_CASE )
# Out features must be a list
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(__SCREAMING_SNAKE_CASE , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(__SCREAMING_SNAKE_CASE , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def UpperCamelCase ( self : Any ) -> Optional[int]:
lowerCamelCase_ = BackboneMixin()
lowerCamelCase_ = ['a', 'b', 'c']
lowerCamelCase_ = ['a', 'c']
lowerCamelCase_ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 183 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_SCREAMING_SNAKE_CASE : Any = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase__ ( _lowerCamelCase : str ) -> str:
if "://" in dataset_path:
lowerCamelCase_ = dataset_path.split('://' )[1]
return dataset_path
def lowerCamelCase__ ( _lowerCamelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase__ ( _lowerCamelCase : fsspec.AbstractFileSystem , _lowerCamelCase : str , _lowerCamelCase : str ) -> int:
lowerCamelCase_ = not is_remote_filesystem(_lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase ) , fs._strip_protocol(_lowerCamelCase ) )
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase )
def lowerCamelCase__ ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = threading.Lock()
| 183 | 1 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__UpperCamelCase : List[Any] = True
except (ImportError, AttributeError):
__UpperCamelCase : List[str] = object
def _a ( *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
pass
__UpperCamelCase : str = False
__UpperCamelCase : int = logging.get_logger("transformers-cli/serving")
def _a ( SCREAMING_SNAKE_CASE : Namespace ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(SCREAMING_SNAKE_CASE , args.host , args.port , args.workers )
class __magic_name__ ( __lowerCAmelCase):
A: dict
class __magic_name__ ( __lowerCAmelCase):
A: List[str]
A: Optional[List[int]]
class __magic_name__ ( __lowerCAmelCase):
A: str
class __magic_name__ ( __lowerCAmelCase):
A: Any
class __magic_name__ ( __lowerCAmelCase):
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase__ : ArgumentParser ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Dict = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase__ , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : Union[str, Any] , lowerCamelCase__ : Pipeline , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : int = pipeline
UpperCamelCase__ : Union[str, Any] = host
UpperCamelCase__ : List[Any] = port
UpperCamelCase__ : Union[str, Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"Serving model over {host}:{port}" )
UpperCamelCase__ : List[str] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=['''POST'''] , ),
] , timeout=600 , )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : str = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ : bool = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ) -> List[Any]:
'''simple docstring'''
try:
UpperCamelCase__ : Tuple = self._pipeline.tokenizer.tokenize(lowerCamelCase__ )
if return_ids:
UpperCamelCase__ : Optional[int] = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
return ServeTokenizeResult(tokens=lowerCamelCase__ , tokens_ids=lowerCamelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[int] = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ : bool = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ : bool = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , ) -> Optional[Any]:
'''simple docstring'''
try:
UpperCamelCase__ : Dict = self._pipeline.tokenizer.decode(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCamelCase__ )} )
async def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[Any]=Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ) -> Tuple:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
UpperCamelCase__ : Union[str, Any] = self._pipeline(lowerCamelCase__ )
return ServeForwardResult(output=lowerCamelCase__ )
except Exception as e:
raise HTTPException(500 , {'''error''': str(lowerCamelCase__ )} )
| 51 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : WhisperForConditionalGeneration , lowerCamelCase__ : WhisperProcessor , lowerCamelCase__ : AutoencoderKL , lowerCamelCase__ : CLIPTextModel , lowerCamelCase__ : CLIPTokenizer , lowerCamelCase__ : UNetaDConditionModel , lowerCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase__ : StableDiffusionSafetyChecker , lowerCamelCase__ : CLIPImageProcessor , ) -> List[str]:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase__ , speech_processor=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[Union[str, int]] = "auto" ) -> List[Any]:
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase__ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase__ )
@torch.no_grad()
def __call__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=16000 , lowerCamelCase__ : int = 512 , lowerCamelCase__ : int = 512 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : List[str] , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = self.speech_processor.feature_extractor(
lowerCamelCase__ , return_tensors='''pt''' , sampling_rate=lowerCamelCase__ ).input_features.to(self.device )
UpperCamelCase__ : str = self.speech_model.generate(lowerCamelCase__ , max_length=480000 )
UpperCamelCase__ : Dict = self.speech_processor.tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , normalize=lowerCamelCase__ )[
0
]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Optional[Any] = 1
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = len(lowerCamelCase__ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowerCamelCase__ )}." )
# get prompt text embeddings
UpperCamelCase__ : int = self.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCamelCase__ : str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase__ : List[str] = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = text_embeddings.shape
UpperCamelCase__ : List[Any] = text_embeddings.repeat(1 , lowerCamelCase__ , 1 )
UpperCamelCase__ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase__ : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ : List[str]
if negative_prompt is None:
UpperCamelCase__ : Tuple = [''''''] * batch_size
elif type(lowerCamelCase__ ) is not type(lowerCamelCase__ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase__ )} !="
F" {type(lowerCamelCase__ )}." )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : str = [negative_prompt]
elif batch_size != len(lowerCamelCase__ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase__ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
UpperCamelCase__ : Any = negative_prompt
UpperCamelCase__ : Any = text_input_ids.shape[-1]
UpperCamelCase__ : Optional[int] = self.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' , )
UpperCamelCase__ : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ : List[str] = uncond_embeddings.shape[1]
UpperCamelCase__ : Optional[int] = uncond_embeddings.repeat(1 , lowerCamelCase__ , 1 )
UpperCamelCase__ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ : int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase__ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase__ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase__ : Union[str, Any] = torch.randn(lowerCamelCase__ , generator=lowerCamelCase__ , device='''cpu''' , dtype=lowerCamelCase__ ).to(
self.device )
else:
UpperCamelCase__ : int = torch.randn(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase__ : Dict = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase__ : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ : Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ : Tuple = {}
if accepts_eta:
UpperCamelCase__ : List[Any] = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ : int = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# predict the noise residual
UpperCamelCase__ : Optional[Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ : List[Any] = noise_pred.chunk(2 )
UpperCamelCase__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : List[Any] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : str = 1 / 0.1_8215 * latents
UpperCamelCase__ : Optional[int] = self.vae.decode(lowerCamelCase__ ).sample
UpperCamelCase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ : int = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase__ , nsfw_content_detected=lowerCamelCase__ )
| 51 | 1 |
"""simple docstring"""
def lowercase (snake_case__ : int = 600_851_475_143 ) -> int:
'''simple docstring'''
try:
lowerCAmelCase = int(snake_case__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowerCAmelCase = 2
lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCAmelCase = i
while n % i == 0:
lowerCAmelCase = n // i
i += 1
return int(snake_case__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 155 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def __lowercase ( self : Tuple ):
super().setUp()
# fmt: off
lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase ) )
def __lowercase ( self : Optional[Any] , **lowerCAmelCase : str ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __lowercase ( self : Any , **lowerCAmelCase : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Dict ):
lowerCAmelCase = """lower newer"""
lowerCAmelCase = """lower newer"""
return input_text, output_text
def __lowercase ( self : int ):
lowerCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = tokens + [tokenizer.unk_token]
lowerCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
@require_ftfy
def __lowercase ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase = """xa\u0303y""" + """ """ + """x\xe3y"""
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Any ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase = f'''{text_of_1_token} {text_of_1_token}'''
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , )
lowerCAmelCase = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
lowerCAmelCase = f''' {text}'''
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , )
lowerCAmelCase = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ) + 1, 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
def __lowercase ( self : Dict ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def __lowercase ( self : Optional[int] ):
super().test_tokenization_python_rust_equals()
def __lowercase ( self : Optional[int] ):
# CLIP always lower cases letters
pass
| 155 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : List[Any] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 338 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : Optional[int] = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 338 | 1 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
lowerCAmelCase__ :Dict = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
lowerCAmelCase__ :Dict = str(bin(_SCREAMING_SNAKE_CASE ) )[2:]
lowerCAmelCase__ :Union[str, Any] = max(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_SCREAMING_SNAKE_CASE ) , b_binary.zfill(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__A = Lock()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Any = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase__ :Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Optional[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase__ :List[str] = Pipe()
lowerCAmelCase__ :List[Any] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCAmelCase__ :Dict = temp_rs
lowerCAmelCase__ :Optional[Any] = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
lowerCAmelCase__ :Union[str, Any] = Pipe()
lowerCAmelCase__ :List[str] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCAmelCase__ :Union[str, Any] = temp_rs
lowerCAmelCase__ :Any = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 293 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _snake_case ( a__ ):
lowerCAmelCase :List[str] = (DEISMultistepScheduler,)
lowerCAmelCase :Optional[Any] = (('''num_inference_steps''', 25),)
def snake_case__ ( self , **_lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**_lowerCamelCase)
return config
def snake_case__ ( self , _lowerCamelCase=0 , **_lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = dict(self.forward_default_kwargs)
UpperCAmelCase__ : List[Any] = kwargs.pop("""num_inference_steps""" , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = self.dummy_sample
UpperCAmelCase__ : Any = 0.1 * sample
UpperCAmelCase__ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : Tuple = self.get_scheduler_config(**_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = scheduler_class(**_lowerCamelCase)
scheduler.set_timesteps(_lowerCamelCase)
# copy over dummy past residuals
UpperCAmelCase__ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = scheduler_class.from_pretrained(_lowerCamelCase)
new_scheduler.set_timesteps(_lowerCamelCase)
# copy over dummy past residuals
UpperCAmelCase__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = sample, sample
for t in range(_lowerCamelCase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase__ : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase).prev_sample
UpperCAmelCase__ : Optional[int] = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self):
pass
def snake_case__ ( self , _lowerCamelCase=0 , **_lowerCamelCase):
UpperCAmelCase__ : Dict = dict(self.forward_default_kwargs)
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""num_inference_steps""" , _lowerCamelCase)
UpperCAmelCase__ : Optional[int] = self.dummy_sample
UpperCAmelCase__ : Any = 0.1 * sample
UpperCAmelCase__ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase__ : Dict = scheduler_class(**_lowerCamelCase)
scheduler.set_timesteps(_lowerCamelCase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase)
UpperCAmelCase__ : Dict = scheduler_class.from_pretrained(_lowerCamelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCamelCase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase__ : List[Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase).prev_sample
UpperCAmelCase__ : Optional[int] = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self , _lowerCamelCase=None , **_lowerCamelCase):
if scheduler is None:
UpperCAmelCase__ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config(**_lowerCamelCase)
UpperCAmelCase__ : List[str] = scheduler_class(**_lowerCamelCase)
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : int = self.get_scheduler_config(**_lowerCamelCase)
UpperCAmelCase__ : Dict = scheduler_class(**_lowerCamelCase)
UpperCAmelCase__ : str = 10
UpperCAmelCase__ : str = self.dummy_model()
UpperCAmelCase__ : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase__ : Tuple = model(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase).prev_sample
return sample
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = dict(self.forward_default_kwargs)
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""num_inference_steps""" , _lowerCamelCase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : List[Any] = self.get_scheduler_config()
UpperCAmelCase__ : Dict = scheduler_class(**_lowerCamelCase)
UpperCAmelCase__ : int = self.dummy_sample
UpperCAmelCase__ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowerCamelCase , """set_timesteps"""):
scheduler.set_timesteps(_lowerCamelCase)
elif num_inference_steps is not None and not hasattr(_lowerCamelCase , """set_timesteps"""):
UpperCAmelCase__ : int = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCAmelCase__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase__ : List[str] = scheduler.timesteps[5]
UpperCAmelCase__ : Dict = scheduler.timesteps[6]
UpperCAmelCase__ : str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase).prev_sample
UpperCAmelCase__ : Any = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def snake_case__ ( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase__ : Optional[int] = DEISMultistepScheduler(**self.get_scheduler_config())
UpperCAmelCase__ : Tuple = self.full_loop(scheduler=_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(_lowerCamelCase))
assert abs(result_mean.item() - 0.23916) < 1e-3
UpperCAmelCase__ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase__ : int = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase__ : int = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase__ : List[str] = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase__ : Union[str, Any] = self.full_loop(scheduler=_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(_lowerCamelCase))
assert abs(result_mean.item() - 0.23916) < 1e-3
def snake_case__ ( self):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase)
def snake_case__ ( self):
self.check_over_configs(thresholding=_lowerCamelCase)
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , algorithm_type="""deis""" , solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , )
def snake_case__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase)
def snake_case__ ( self):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , prediction_type=_lowerCamelCase , algorithm_type=_lowerCamelCase , )
UpperCAmelCase__ : str = self.full_loop(
solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , prediction_type=_lowerCamelCase , algorithm_type=_lowerCamelCase , )
assert not torch.isnan(_lowerCamelCase).any(), "Samples have nan numbers"
def snake_case__ ( self):
self.check_over_configs(lower_order_final=_lowerCamelCase)
self.check_over_configs(lower_order_final=_lowerCamelCase)
def snake_case__ ( self):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCamelCase , time_step=0)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.full_loop()
UpperCAmelCase__ : int = torch.mean(torch.abs(_lowerCamelCase))
assert abs(result_mean.item() - 0.23916) < 1e-3
def snake_case__ ( self):
UpperCAmelCase__ : int = self.full_loop(prediction_type="""v_prediction""")
UpperCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(_lowerCamelCase))
assert abs(result_mean.item() - 0.091) < 1e-3
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config(thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0)
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase)
UpperCAmelCase__ : Any = 10
UpperCAmelCase__ : Optional[Any] = self.dummy_model()
UpperCAmelCase__ : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCamelCase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Optional[int] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase).prev_sample
assert sample.dtype == torch.floataa | 283 |
'''simple docstring'''
import numpy
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase__ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase__ : Optional[int] = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase__ : Any = numpy.random.rand(3 , 1)
# Real output values provided.
UpperCAmelCase__ : Tuple = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase__ : Union[str, Any] = numpy.zeros(output_array.shape)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
UpperCAmelCase__ : str = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
UpperCAmelCase__ : Any = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
for iteration in range(1 , iterations + 1):
UpperCAmelCase__ : Optional[Any] = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase__ : str = numpy.mean(numpy.square(output - self.feedforward()))
print(f'''Iteration {iteration} Loss: {loss}''')
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = input_arr
UpperCAmelCase__ : Tuple = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
UpperCAmelCase__ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
UpperCAmelCase__ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def _UpperCamelCase ( UpperCamelCase__ ):
return 1 / (1 + numpy.exp(-value ))
def _UpperCamelCase ( UpperCamelCase__ ):
return (value) * (1 - (value))
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCAmelCase__ : str = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=UpperCamelCase__ , output_array=UpperCamelCase__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCamelCase__ , iterations=1_0 , give_loss=UpperCamelCase__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 283 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = ["pixel_values"]
def __init__( self : Optional[int] ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : int = 0.9 ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : int ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase__ : Optional[Any] = get_size_dict(_snake_case ,default_to_square=_snake_case )
lowercase__ : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Dict = get_size_dict(_snake_case ,param_name='''crop_size''' )
lowercase__ : int = do_resize
lowercase__ : Optional[Any] = size
lowercase__ : Tuple = crop_pct
lowercase__ : int = resample
lowercase__ : Dict = do_center_crop
lowercase__ : int = crop_size
lowercase__ : List[Any] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : List[Any] = do_normalize
lowercase__ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[float] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[Any] ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase__ : Dict = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase__ : Tuple = int(size['''height'''] / crop_pct )
else:
lowercase__ : List[str] = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(_snake_case ) )
lowercase__ : List[Any] = get_resize_output_image_size(_snake_case ,size=_snake_case ,default_to_square=_snake_case )
else:
if "shortest_edge" in size:
lowercase__ : Tuple = get_resize_output_image_size(_snake_case ,size=size['''shortest_edge'''] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
lowercase__ : Optional[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(_snake_case ) )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : List[Any] ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Union[str, Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : np.ndarray ,_snake_case : Union[int, float] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : str ,) -> Union[str, Any]:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[Any] ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : ImageInput ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : int = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : bool = None ,_snake_case : float = None ,_snake_case : bool = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : ChannelDimension = ChannelDimension.FIRST ,**_snake_case : List[Any] ,) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = crop_pct if crop_pct is not None else self.crop_pct
lowercase__ : List[Any] = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Optional[Any] = size if size is not None else self.size
lowercase__ : Optional[Any] = get_size_dict(_snake_case ,default_to_square=_snake_case )
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[Any] = get_size_dict(_snake_case ,param_name='''crop_size''' )
lowercase__ : Dict = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase__ : int = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : Optional[Any] = [self.resize(image=_snake_case ,size=_snake_case ,crop_pct=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
lowercase__ : List[Any] = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
lowercase__ : List[Any] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : Union[str, Any] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : List[Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 16 |
"""simple docstring"""
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a =set()
# Replace all the whitespace in our sentence
a =input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowercase ) == 26
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a =[False] * 26
for char in input_str:
if char.islower():
a =True
elif char.isupper():
a =True
return all(lowercase )
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _A ( ):
"""simple docstring"""
from timeit import timeit
a ='''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowercase ) )
print(timeit('''is_pangram_faster()''' , setup=lowercase ) )
print(timeit('''is_pangram_fastest()''' , setup=lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 81 | 0 |
from __future__ import annotations
class lowercase :
def __init__( self ,A__):
lowercase = data
lowercase = None
lowercase = None
def UpperCamelCase ( lowerCAmelCase__ ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def UpperCamelCase ( ): # Main function for testing.
'''simple docstring'''
lowercase = Node(1 )
lowercase = Node(2 )
lowercase = Node(3 )
lowercase = Node(4 )
lowercase = Node(5 )
lowercase = Node(6 )
lowercase = Node(7 )
lowercase = Node(8 )
lowercase = Node(9 )
print(is_full_binary_tree(lowerCAmelCase__ ) )
print(depth_of_tree(lowerCAmelCase__ ) )
print('''Tree is: ''' )
display(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 97 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowercase__ :Union[str, Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[Any] = ["DPTFeatureExtractor"]
lowercase__ :List[Any] = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowercase__ :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCamelCase : int = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# save results
if os.path.exists(A_ ):
if os.path.exists(os.path.join(A_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(A_ , '''config.json''' ) ):
os.remove(os.path.join(A_ , '''config.json''' ) )
if os.path.exists(os.path.join(A_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(A_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(A_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(A_ )
model.save_pretrained(A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=False ):
lowerCAmelCase__ : Optional[Any] = 2
if unlogit:
lowerCAmelCase__ : Union[str, Any] = torch.pow(A_ , A_ )
lowerCAmelCase__ : Optional[Any] = p * torch.log(A_ )
lowerCAmelCase__ : List[Any] = 0
return -plogp.sum(dim=-1 )
def __SCREAMING_SNAKE_CASE ( A_ ):
logger.info('''lv, h >\t''' + '''\t'''.join(f'{x + 1}' for x in range(len(A_ ) ) ) )
for row in range(len(A_ ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_=True , A_=True , A_=None , A_=False ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase__ : Dict = torch.zeros(A_ , A_ ).to(args.device )
lowerCAmelCase__ : int = torch.zeros(A_ , A_ ).to(args.device )
if head_mask is None:
lowerCAmelCase__ : Union[str, Any] = torch.ones(A_ , A_ ).to(args.device )
head_mask.requires_grad_(requires_grad=A_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(A_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase__ : Any = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase__) ,) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase__ : Any = model(A_ , labels=A_ , head_mask=A_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A_ ):
lowerCAmelCase__ : Dict = entropy(attn.detach() , A_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : Dict = torch.pow(torch.pow(A_ , A_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCAmelCase__ : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(A_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(A_ )
logger.info('''Head ranked by importance scores''' )
lowerCAmelCase__ : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase__ : Optional[int] = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase__ : int = head_ranks.view_as(A_ )
print_ad_tensor(A_ )
return attn_entropy, head_importance, total_loss
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = compute_heads_importance(A_ , A_ , A_ , compute_entropy=A_ )
lowerCAmelCase__ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , A_ , original_score * args.masking_threshold )
lowerCAmelCase__ : Union[str, Any] = torch.ones_like(A_ )
lowerCAmelCase__ : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase__ : int = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase__ : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase__ : str = float('''Inf''' )
lowerCAmelCase__ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(A_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCAmelCase__ : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase__ : int = new_head_mask.view(-1 )
lowerCAmelCase__ : Optional[int] = 0.0
lowerCAmelCase__ : Union[str, Any] = new_head_mask.view_as(A_ )
lowerCAmelCase__ : Tuple = new_head_mask.clone().detach()
print_ad_tensor(A_ )
# Compute metric and head importance again
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , head_mask=A_ )
lowerCAmelCase__ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , A_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('''Final head mask''' )
print_ad_tensor(A_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = datetime.now()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ )
lowerCAmelCase__ : Optional[Any] = 1 / loss
lowerCAmelCase__ : Tuple = datetime.now() - before_time
lowerCAmelCase__ : int = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : List[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(A_ , A_ ):
lowerCAmelCase__ : int = [
v,
]
assert sum(len(A_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A_ )
lowerCAmelCase__ : List[Any] = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : Any = datetime.now()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : int = compute_heads_importance(
A_ , A_ , A_ , compute_entropy=A_ , compute_importance=A_ , head_mask=A_ , actually_pruned=A_ , )
lowerCAmelCase__ : int = 1 / loss
lowerCAmelCase__ : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , A_ , A_ , pruned_num_params / original_num_params * 1_00 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , A_ , A_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 )
save_model(A_ , args.output_dir )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=A_ , type=A_ , required=A_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=A_ , type=A_ , required=A_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=A_ , type=A_ , required=A_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=A_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=A_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=A_ , type=A_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=A_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=A_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=A_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=A_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=A_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=A_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=A_ , default=42 )
parser.add_argument('''--local_rank''' , type=A_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=A_ , default='''''' , help='''Can be used for distant debugging.''' )
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase__ : Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCAmelCase__ : str = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase__ : Dict = torch.device('''cuda''' , args.local_rank )
lowerCAmelCase__ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase__ : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase__ : Dict = nn.parallel.DistributedDataParallel(
A_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A_ )
elif args.n_gpu > 1:
lowerCAmelCase__ : List[Any] = nn.DataParallel(A_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A_ )
torch.save(A_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , A_ )
# Prepare dataset
lowerCAmelCase__ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase__ : Union[str, Any] = (torch.from_numpy(A_ ),)
lowerCAmelCase__ : Tuple = TensorDataset(*A_ )
lowerCAmelCase__ : Optional[int] = RandomSampler(A_ )
lowerCAmelCase__ : Dict = DataLoader(A_ , sampler=A_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A_ , A_ , A_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase__ : Tuple = mask_heads(A_ , A_ , A_ )
prune_heads(A_ , A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 106 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 0 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Any = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """xlm-prophetnet"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[float] = 0.1 , SCREAMING_SNAKE_CASE_ : Optional[Union[str, Callable]] = "gelu" , SCREAMING_SNAKE_CASE_ : Optional[int] = 3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1_0_2_4 , SCREAMING_SNAKE_CASE_ : Optional[int] = 4_0_9_6 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1_2 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1_6 , SCREAMING_SNAKE_CASE_ : Optional[int] = 4_0_9_6 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1_2 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1_6 , SCREAMING_SNAKE_CASE_ : Optional[float] = 0.1 , SCREAMING_SNAKE_CASE_ : Optional[float] = 0.1 , SCREAMING_SNAKE_CASE_ : Optional[int] = 5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[float] = 0.02 , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[int] = 0 , SCREAMING_SNAKE_CASE_ : Optional[int] = 2 , SCREAMING_SNAKE_CASE_ : Optional[int] = 3_2 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1_2_8 , SCREAMING_SNAKE_CASE_ : Optional[bool] = False , SCREAMING_SNAKE_CASE_ : Optional[float] = 0.0 , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[int] = 0 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : Optional[int] = 2 , **SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : List[Any] = hidden_size
lowerCAmelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCAmelCase_ : List[Any] = num_encoder_layers
lowerCAmelCase_ : int = num_encoder_attention_heads
lowerCAmelCase_ : Tuple = decoder_ffn_dim
lowerCAmelCase_ : Union[str, Any] = num_decoder_layers
lowerCAmelCase_ : Optional[Any] = num_decoder_attention_heads
lowerCAmelCase_ : List[Any] = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = init_std # Normal(0, this parameter)
lowerCAmelCase_ : int = activation_function
# parameters for xlmprophetnet
lowerCAmelCase_ : str = ngram
lowerCAmelCase_ : Tuple = num_buckets
lowerCAmelCase_ : Optional[int] = relative_max_distance
lowerCAmelCase_ : Dict = disable_ngram_loss
lowerCAmelCase_ : str = eps
# 3 Types of Dropout
lowerCAmelCase_ : int = attention_dropout
lowerCAmelCase_ : Any = activation_dropout
lowerCAmelCase_ : Optional[int] = dropout
lowerCAmelCase_ : Tuple = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 289 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : Dict = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : List[Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Tuple = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = RobertaTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[int]="replace" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : List[str]="</s>" , UpperCAmelCase__ : Optional[int]="<s>" , UpperCAmelCase__ : Dict="<unk>" , UpperCAmelCase__ : List[str]="<pad>" , UpperCAmelCase__ : Dict="<mask>" , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : Tuple , ) ->int:
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase__) != add_prefix_space:
A__ = getattr(UpperCAmelCase__ , pre_tok_state.pop('''type'''))
A__ = add_prefix_space
A__ = pre_tok_class(**UpperCAmelCase__)
A__ = add_prefix_space
A__ = '''post_processor'''
A__ = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__)
if tokenizer_component_instance:
A__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ = tuple(state['''sep'''])
if "cls" in state:
A__ = tuple(state['''cls'''])
A__ = False
if state.get('''add_prefix_space''' , UpperCAmelCase__) != add_prefix_space:
A__ = add_prefix_space
A__ = True
if state.get('''trim_offsets''' , UpperCAmelCase__) != trim_offsets:
A__ = trim_offsets
A__ = True
if changes_to_apply:
A__ = getattr(UpperCAmelCase__ , state.pop('''type'''))
A__ = component_class(**UpperCAmelCase__)
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''')
return None
return str(self._mask_token)
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else value
A__ = value
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple) ->BatchEncoding:
'''simple docstring'''
A__ = kwargs.get('''is_split_into_words''' , UpperCAmelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : str) ->BatchEncoding:
'''simple docstring'''
A__ = kwargs.get('''is_split_into_words''' , UpperCAmelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
A__ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__)
return tuple(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=None) ->List[str]:
'''simple docstring'''
A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 14 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14 | 1 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = []
for part_id in partition_order:
UpperCAmelCase = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(lowerCAmelCase ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase = spark.range(100 ).repartition(1 )
UpperCAmelCase = Spark(lowerCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase = spark.range(10 ).repartition(2 )
UpperCAmelCase = [1, 0]
UpperCAmelCase = _generate_iterable_examples(lowerCAmelCase , lowerCAmelCase ) # Reverse the partitions.
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase , lowerCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase = spark.range(10 ).repartition(1 )
UpperCAmelCase = SparkExamplesIterable(lowerCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
UpperCAmelCase = lambda lowerCAmelCase : x.reverse()
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase , [2, 1, 0] )
UpperCAmelCase = SparkExamplesIterable(lowerCAmelCase ).shuffle_data_sources(lowerCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase = SparkExamplesIterable(lowerCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase = SparkExamplesIterable(lowerCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase = spark.range(100 ).repartition(1 )
UpperCAmelCase = Spark(lowerCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 357 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : str = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 248 | 0 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
a : Optional[int] = get_tests_dir("fixtures/dummy-config.json")
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = 0
def A_ ( self ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = AutoConfig.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = AutoConfig.for_model("roberta" )
self.assertIsInstance(snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCAmelCase : Tuple = os.path.join(snake_case , "fake-roberta" )
os.makedirs(snake_case , exist_ok=snake_case )
with open(os.path.join(snake_case , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(snake_case )
self.assertEqual(type(snake_case ) , snake_case )
def A_ ( self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , snake_case )
# Wrong model type will raise an error
with self.assertRaises(snake_case ):
AutoConfig.register("model" , snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case ):
AutoConfig.register("bert" , snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : int = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def A_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase : str = AutoConfig.from_pretrained("bert-base" )
def A_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case , revision="aaaaaa" )
def A_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
UpperCAmelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def A_ ( self ):
'''simple docstring'''
with self.assertRaises(snake_case ):
UpperCAmelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case ):
UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case )
UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case )
UpperCAmelCase : Tuple = AutoConfig.from_pretrained(snake_case , trust_remote_code=snake_case )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def A_ ( self ):
'''simple docstring'''
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = "new-model"
try:
AutoConfig.register("new-model" , snake_case )
# If remote code is not set, the default is to use local
UpperCAmelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 311 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
'''simple docstring'''
a : Any = ["""input_features""", """is_longer"""]
def __init__(self ,_lowerCamelCase=64 ,_lowerCamelCase=48000 ,_lowerCamelCase=480 ,_lowerCamelCase=10 ,_lowerCamelCase=1024 ,_lowerCamelCase=0.0 ,_lowerCamelCase=False ,_lowerCamelCase = 0 ,_lowerCamelCase = 14000 ,_lowerCamelCase = None ,_lowerCamelCase = "fusion" ,_lowerCamelCase = "repeatpad" ,**_lowerCamelCase ,) -> int:
'''simple docstring'''
super().__init__(
feature_size=__lowercase ,sampling_rate=__lowercase ,padding_value=__lowercase ,return_attention_mask=__lowercase ,**__lowercase ,)
__lowercase = top_db
__lowercase = truncation
__lowercase = padding
__lowercase = fft_window_size
__lowercase = (fft_window_size >> 1) + 1
__lowercase = hop_length
__lowercase = max_length_s
__lowercase = max_length_s * sampling_rate
__lowercase = sampling_rate
__lowercase = frequency_min
__lowercase = frequency_max
__lowercase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__lowercase ,min_frequency=__lowercase ,max_frequency=__lowercase ,sampling_rate=__lowercase ,norm=__lowercase ,mel_scale='''htk''' ,)
__lowercase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__lowercase ,min_frequency=__lowercase ,max_frequency=__lowercase ,sampling_rate=__lowercase ,norm='''slaney''' ,mel_scale='''slaney''' ,)
def _UpperCAmelCase (self ) -> Dict[str, Any]:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> np.ndarray:
'''simple docstring'''
__lowercase = spectrogram(
__lowercase ,window_function(self.fft_window_size ,'''hann''' ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=__lowercase ,log_mel='''dB''' ,)
return log_mel_spectrogram.T
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__lowercase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__lowercase = [0]
# randomly choose index for each part
__lowercase = np.random.choice(ranges[0] )
__lowercase = np.random.choice(ranges[1] )
__lowercase = np.random.choice(ranges[2] )
__lowercase = mel[idx_front : idx_front + chunk_frames, :]
__lowercase = mel[idx_middle : idx_middle + chunk_frames, :]
__lowercase = mel[idx_back : idx_back + chunk_frames, :]
__lowercase = torch.tensor(mel[None, None, :] )
__lowercase = torch.nn.functional.interpolate(
__lowercase ,size=[chunk_frames, 64] ,mode='''bilinear''' ,align_corners=__lowercase )
__lowercase = mel_shrink[0][0].numpy()
__lowercase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__lowercase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__lowercase = len(__lowercase ) - max_length
__lowercase = np.random.randint(0 ,overflow + 1 )
__lowercase = waveform[idx : idx + max_length]
__lowercase = self._np_extract_fbank_features(__lowercase ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__lowercase = self._np_extract_fbank_features(__lowercase ,self.mel_filters )
__lowercase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__lowercase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__lowercase = np.stack([mel, mel, mel, mel] ,axis=0 )
__lowercase = False
else:
__lowercase = self._random_mel_fusion(__lowercase ,__lowercase ,__lowercase )
__lowercase = True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented" )
else:
__lowercase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__lowercase = int(max_length / len(__lowercase ) )
__lowercase = np.stack(np.tile(__lowercase ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__lowercase = int(max_length / len(__lowercase ) )
__lowercase = np.stack(np.tile(__lowercase ,__lowercase ) )
__lowercase = np.pad(__lowercase ,(0, max_length - waveform.shape[0]) ,mode='''constant''' ,constant_values=0 )
if truncation == "fusion":
__lowercase = self._np_extract_fbank_features(__lowercase ,self.mel_filters )
__lowercase = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
__lowercase = self._np_extract_fbank_features(__lowercase ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__(self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> BatchFeature:
'''simple docstring'''
__lowercase = truncation if truncation is not None else self.truncation
__lowercase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__lowercase = isinstance(__lowercase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
__lowercase = is_batched_numpy or (
isinstance(__lowercase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase = [np.asarray(__lowercase ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase ,np.ndarray ):
__lowercase = np.asarray(__lowercase ,dtype=np.floataa )
elif isinstance(__lowercase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase = [np.asarray(__lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
__lowercase = [
self._get_input_mel(__lowercase ,max_length if max_length else self.nb_max_samples ,__lowercase ,__lowercase )
for waveform in raw_speech
]
__lowercase = []
__lowercase = []
for mel, longer in padded_inputs:
input_mel.append(__lowercase )
is_longer.append(__lowercase )
if truncation == "fusion" and sum(__lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__lowercase = np.random.randint(0 ,len(__lowercase ) )
__lowercase = True
if isinstance(input_mel[0] ,__lowercase ):
__lowercase = [np.asarray(__lowercase ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__lowercase = [[longer] for longer in is_longer]
__lowercase = {'''input_features''': input_mel, '''is_longer''': is_longer}
__lowercase = BatchFeature(__lowercase )
if return_tensors is not None:
__lowercase = input_features.convert_to_tensors(__lowercase )
return input_features
| 367 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int = 6_0_0_8_5_1_4_7_5_1_4_3 ):
try:
__lowercase = int(lowerCamelCase_ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__lowercase = 1
__lowercase = 2
while i * i <= n:
while n % i == 0:
__lowercase = i
n //= i
i += 1
if n > 1:
__lowercase = n
return int(lowerCamelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 217 | 0 |
from __future__ import annotations
import math
lowerCamelCase__ = """2020.9.26"""
lowerCamelCase__ = """xcodz-dot, cclaus, dhruvmanila"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[float, float]:
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in locals().values() ):
lowerCAmelCase__ : List[str] = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = ((x * distance) / (z + distance)) * scale
lowerCAmelCase__ : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[float, float, float]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('Axis must be a str' )
lowerCAmelCase__ : Optional[int] = locals()
del input_variables["axis"]
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in input_variables.values() ):
lowerCAmelCase__ : List[Any] = (
'Input values except axis must either be float or int: '
F'''{list(input_variables.values() )}'''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCAmelCase__ : Tuple = x * math.cos(SCREAMING_SNAKE_CASE_ ) - y * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = y * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = z
elif axis == "x":
lowerCAmelCase__ : Dict = y * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = z * math.cos(SCREAMING_SNAKE_CASE_ ) + y * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = x
elif axis == "y":
lowerCAmelCase__ : str = x * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = z * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 212 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( __magic_name__ ):
lowercase = 42
lowercase = 42
def __init__( self : Any , a : UNetaDModel , a : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : List[str] , a : int = 1 , a : int = 2_000 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , **a : int , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.unet.config.sample_size
lowerCAmelCase__ : Union[str, Any] = (batch_size, 3, img_size, img_size)
lowerCAmelCase__ : Tuple = self.unet
lowerCAmelCase__ : Optional[Any] = randn_tensor(a , generator=a ) * self.scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(self.device )
self.scheduler.set_timesteps(a )
self.scheduler.set_sigmas(a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase__ : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase__ : Optional[Any] = self.unet(a , a ).sample
lowerCAmelCase__ : Dict = self.scheduler.step_correct(a , a , generator=a ).prev_sample
# prediction step
lowerCAmelCase__ : Optional[int] = model(a , a ).sample
lowerCAmelCase__ : Optional[Any] = self.scheduler.step_pred(a , a , a , generator=a )
lowerCAmelCase__ , lowerCAmelCase__ : str = output.prev_sample, output.prev_sample_mean
lowerCAmelCase__ : Any = sample_mean.clamp(0 , 1 )
lowerCAmelCase__ : List[str] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : int = self.numpy_to_pil(a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=a ) | 212 | 1 |
from collections import deque
from .hash_table import HashTable
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,**A__):
super().__init__(*A__ ,**A__)
def A__ ( self ,A__ ,A__):
lowercase = deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(A__)
lowercase = self.values[key]
def A__ ( self):
return (
sum(self.charge_factor - len(A__) for slot in self.values)
/ self.size_table
* self.charge_factor
)
def A__ ( self ,A__ ,A__=None):
if not (
len(self.values[key]) == self.charge_factor and self.values.count(A__) == 0
):
return key
return super()._collision_resolution(A__ ,A__)
| 367 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowercase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowercase = [[0.0, 0.0], [0.0, 0.0]]
lowercase , lowercase = matrix[1][1], matrix[0][0]
lowercase , lowercase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowercase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowercase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowercase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowercase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowercase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowercase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowercase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowercase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowercase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowercase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowercase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowercase = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
lowercase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowercase = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 97 | 0 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Optional[Any] = torch.nn.Linear(10 ,10 )
lowerCAmelCase__ : Any = torch.optim.SGD(model.parameters() ,0.1 )
lowerCAmelCase__ : str = Accelerator()
lowerCAmelCase__ : Tuple = accelerator.prepare(__UpperCAmelCase )
try:
pickle.loads(pickle.dumps(__UpperCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 37 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase ={
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 | 0 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 366 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : Union[str, Any] =logging.get_logger(__name__)
_A : List[Any] ={
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowercase ( _lowercase ):
a = """deformable_detr"""
a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self: Optional[int] , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=None , UpperCamelCase__: List[str]=3 , UpperCamelCase__: Any=300 , UpperCamelCase__: Optional[int]=1_024 , UpperCamelCase__: int=6 , UpperCamelCase__: str=1_024 , UpperCamelCase__: Optional[Any]=8 , UpperCamelCase__: Optional[Any]=6 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=8 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any="relu" , UpperCamelCase__: Any=256 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: Any=0.0 , UpperCamelCase__: List[str]=0.02 , UpperCamelCase__: str=1.0 , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: str="sine" , UpperCamelCase__: Optional[Any]="resnet50" , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: List[Any]=4 , UpperCamelCase__: Any=4 , UpperCamelCase__: int=4 , UpperCamelCase__: int=False , UpperCamelCase__: Optional[Any]=300 , UpperCamelCase__: str=False , UpperCamelCase__: int=1 , UpperCamelCase__: Tuple=5 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: Optional[int]=1 , UpperCamelCase__: int=1 , UpperCamelCase__: Tuple=5 , UpperCamelCase__: str=2 , UpperCamelCase__: str=0.1 , UpperCamelCase__: List[str]=0.25 , UpperCamelCase__: Any=False , **UpperCamelCase__: Optional[Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase__ : Union[str, Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = backbone_config.get("""model_type""" )
lowerCamelCase__ : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Optional[Any] = config_class.from_dict(UpperCamelCase__ )
lowerCamelCase__ : Tuple = use_timm_backbone
lowerCamelCase__ : Tuple = backbone_config
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = num_queries
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : str = d_model
lowerCamelCase__ : Dict = encoder_ffn_dim
lowerCamelCase__ : Union[str, Any] = encoder_layers
lowerCamelCase__ : int = encoder_attention_heads
lowerCamelCase__ : str = decoder_ffn_dim
lowerCamelCase__ : Optional[Any] = decoder_layers
lowerCamelCase__ : Optional[int] = decoder_attention_heads
lowerCamelCase__ : int = dropout
lowerCamelCase__ : Optional[Any] = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : int = init_std
lowerCamelCase__ : Dict = init_xavier_std
lowerCamelCase__ : Tuple = encoder_layerdrop
lowerCamelCase__ : str = auxiliary_loss
lowerCamelCase__ : int = position_embedding_type
lowerCamelCase__ : Tuple = backbone
lowerCamelCase__ : Tuple = use_pretrained_backbone
lowerCamelCase__ : Optional[int] = dilation
# deformable attributes
lowerCamelCase__ : Optional[int] = num_feature_levels
lowerCamelCase__ : Tuple = encoder_n_points
lowerCamelCase__ : Tuple = decoder_n_points
lowerCamelCase__ : Dict = two_stage
lowerCamelCase__ : int = two_stage_num_proposals
lowerCamelCase__ : Any = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCamelCase__ : Optional[int] = class_cost
lowerCamelCase__ : List[str] = bbox_cost
lowerCamelCase__ : Any = giou_cost
# Loss coefficients
lowerCamelCase__ : Union[str, Any] = mask_loss_coefficient
lowerCamelCase__ : Tuple = dice_loss_coefficient
lowerCamelCase__ : int = bbox_loss_coefficient
lowerCamelCase__ : Optional[Any] = giou_loss_coefficient
lowerCamelCase__ : Dict = eos_coefficient
lowerCamelCase__ : Any = focal_alpha
lowerCamelCase__ : Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Dict ):
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self: Any ):
return self.d_model
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase__ : int = self.backbone_config.to_dict()
lowerCamelCase__ : int = self.__class__.model_type
return output
| 129 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase__ ( snake_case_ : Callable[[int | float], int | float] , snake_case_ : int | float , snake_case_ : int | float , snake_case_ : int = 100 , ) -> float:
__snake_case = x_start
__snake_case = fnc(snake_case_ )
__snake_case = 0.0
for _ in range(snake_case_ ):
# Approximates curve as a sequence of linear lines and sums their length
__snake_case = (x_end - x_start) / steps + xa
__snake_case = fnc(snake_case_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__snake_case = xa
__snake_case = fxa
return length
if __name__ == "__main__":
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> int:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
snake_case_ = 10
while i <= 100000:
print(F'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 24 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str ='''microsoft/speecht5_tts'''
lowercase_ : Dict =(
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
lowercase_ : Dict ='''text_reader'''
lowercase_ : int =SpeechTaProcessor
lowercase_ : Tuple =SpeechTaForTextToSpeech
lowercase_ : int =SpeechTaHifiGan
lowercase_ : Optional[int] =['''text''']
lowercase_ : Optional[Any] =['''audio''']
def A__ ( self):
if self.post_processor is None:
lowercase = '''microsoft/speecht5_hifigan'''
super().setup()
def A__ ( self ,A__ ,A__=None):
lowercase = self.pre_processor(text=A__ ,return_tensors='''pt''' ,truncation=A__)
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''')
lowercase = load_dataset('''Matthijs/cmu-arctic-xvectors''' ,split='''validation''')
lowercase = torch.tensor(embeddings_dataset[7_3_0_5]['''xvector''']).unsqueeze(0)
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def A__ ( self ,A__):
with torch.no_grad():
return self.model.generate_speech(**A__)
def A__ ( self ,A__):
with torch.no_grad():
return self.post_processor(A__).cpu().detach()
| 97 |
import numpy as np
import datasets
lowercase__ :Dict = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
lowercase__ :List[Any] = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
lowercase__ :Dict = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''') ,id='''X'''),
}) ,)
def A__ ( self ,A__ ,A__):
# convert to numpy arrays
lowercase = np.array(A__)
lowercase = np.array(A__)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError('''Expected `X` to be a 2D vector''')
if len(reference_distribution.shape) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''')
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''')
# Get mahalanobis distance for each prediction
lowercase = X - np.mean(A__)
lowercase = np.cov(reference_distribution.T)
try:
lowercase = np.linalg.inv(A__)
except np.linalg.LinAlgError:
lowercase = np.linalg.pinv(A__)
lowercase = np.dot(A__ ,A__)
lowercase = np.dot(A__ ,X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| 97 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __snake_case :
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any]=sys.maxsize):
"""simple docstring"""
UpperCAmelCase_ = '''bilinear'''
UpperCAmelCase_ = max_size
UpperCAmelCase_ = short_edge_length
def __call__( self : Tuple , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = []
for img in imgs:
UpperCAmelCase_ , UpperCAmelCase_ = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase_ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
UpperCAmelCase_ = size * 1.0 / min(_snake_case , _snake_case)
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
if max(_snake_case , _snake_case) > self.max_size:
UpperCAmelCase_ = self.max_size * 1.0 / max(_snake_case , _snake_case)
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ = int(neww + 0.5)
UpperCAmelCase_ = int(newh + 0.5)
if img.dtype == np.uinta:
UpperCAmelCase_ = Image.fromarray(_snake_case)
UpperCAmelCase_ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
UpperCAmelCase_ = np.asarray(_snake_case)
else:
UpperCAmelCase_ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase_ = nn.functional.interpolate(
_snake_case , (newh, neww) , mode=self.interp_method , align_corners=_snake_case).squeeze(0)
img_augs.append(_snake_case)
return img_augs
class __snake_case :
def __init__( self : Tuple , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
UpperCAmelCase_ = cfg.INPUT.FORMAT
UpperCAmelCase_ = cfg.SIZE_DIVISIBILITY
UpperCAmelCase_ = cfg.PAD_VALUE
UpperCAmelCase_ = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase_ = cfg.MODEL.DEVICE
UpperCAmelCase_ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
UpperCAmelCase_ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
UpperCAmelCase_ = lambda _snake_case: (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase ( self : str , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = tuple(max(_snake_case) for s in zip(*[img.shape for img in images]))
UpperCAmelCase_ = [im.shape[-2:] for im in images]
UpperCAmelCase_ = [
nn.functional.pad(
_snake_case , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_snake_case , _snake_case)
]
return torch.stack(_snake_case), torch.tensor(_snake_case)
def __call__( self : str , _snake_case : List[str] , _snake_case : int=False):
"""simple docstring"""
with torch.no_grad():
if not isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = [images]
if single_image:
assert len(_snake_case) == 1
for i in range(len(_snake_case)):
if isinstance(images[i] , torch.Tensor):
images.insert(_snake_case , images.pop(_snake_case).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_snake_case , torch.as_tensor(img_tensorize(images.pop(_snake_case) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
UpperCAmelCase_ = torch.tensor([im.shape[:2] for im in images])
UpperCAmelCase_ = self.aug(_snake_case)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase_ = [self.normalizer(_snake_case) for x in images]
# now pad them to do the following operations
UpperCAmelCase_ , UpperCAmelCase_ = self.pad(_snake_case)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase_ = torch.true_divide(_snake_case , _snake_case)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def A (__A : Optional[Any] , __A : Optional[int] ) -> List[Any]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def A (__A : int , __A : Tuple[int, int] ) -> Tuple:
"""simple docstring"""
assert torch.isfinite(__A ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase_ , UpperCAmelCase_ = box_size
tensor[:, 0].clamp_(min=0 , max=__A )
tensor[:, 1].clamp_(min=0 , max=__A )
tensor[:, 2].clamp_(min=0 , max=__A )
tensor[:, 3].clamp_(min=0 , max=__A )
| 51 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
snake_case_ : List[str] = 8
def A (__A : Union[str, Any] , __A : List[Any]=BITS ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x * 255).int().clamp(0 , 255 )
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A )
UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' )
UpperCAmelCase_ = rearrange(__A , '''b c h w -> b c 1 h w''' )
UpperCAmelCase_ = ((x & mask) != 0).float()
UpperCAmelCase_ = rearrange(__A , '''b c d h w -> b (c d) h w''' )
UpperCAmelCase_ = bits * 2 - 1
return bits
def A (__A : Dict , __A : Tuple=BITS ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x > 0).int()
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A , dtype=torch.intaa )
UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' )
UpperCAmelCase_ = rearrange(__A , '''b (c d) h w -> b c d h w''' , d=8 )
UpperCAmelCase_ = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def A (self : List[Any] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : float = 0.0 , __A : bool = True , __A : Tuple=None , __A : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[timestep]
UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(__A , -scale , __A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase_ = self._get_variance(__A , __A )
UpperCAmelCase_ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase_ = model_output.device if torch.is_tensor(__A ) else '''cpu'''
UpperCAmelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__A ).to(__A )
UpperCAmelCase_ = self._get_variance(__A , __A ) ** 0.5 * eta * noise
UpperCAmelCase_ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
def A (self : Optional[int] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : int="epsilon" , __A : Optional[Any]=None , __A : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_ , UpperCAmelCase_ = torch.split(__A , sample.shape[1] , dim=1 )
else:
UpperCAmelCase_ = None
# 1. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[t]
UpperCAmelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(__A , -scale , __A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ = 0
if t > 0:
UpperCAmelCase_ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__A ).to(model_output.device )
UpperCAmelCase_ = (self._get_variance(__A , predicted_variance=__A ) ** 0.5) * noise
UpperCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
class __snake_case ( a ):
def __init__( self : Union[str, Any] , _snake_case : UNetaDConditionModel , _snake_case : Union[DDIMScheduler, DDPMScheduler] , _snake_case : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = bit_scale
UpperCAmelCase_ = (
ddim_bit_scheduler_step if isinstance(_snake_case , _snake_case) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_snake_case , scheduler=_snake_case)
@torch.no_grad()
def __call__( self : Union[str, Any] , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 50 , _snake_case : Optional[torch.Generator] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : Optional[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_snake_case , )
UpperCAmelCase_ = decimal_to_bits(_snake_case) * self.bit_scale
UpperCAmelCase_ = latents.to(self.device)
self.scheduler.set_timesteps(_snake_case)
for t in self.progress_bar(self.scheduler.timesteps):
# predict the noise residual
UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = bits_to_decimal(_snake_case)
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 51 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple = AltDiffusionPipeline
a_ : int = TEXT_TO_IMAGE_PARAMS
a_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
a_ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
lowerCAmelCase_ : List[str] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCAmelCase_ : List[str] = 77
lowerCAmelCase_ : int = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase ( self : Tuple , a_ : Optional[int] , a_ : Optional[int]=0 ):
if str(_snake_case ).startswith("mps" ):
lowerCAmelCase_ : Any = torch.manual_seed(_snake_case )
else:
lowerCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self : Optional[int] ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowerCamelCase ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Dict = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase_ : Optional[Any] = RobertaSeriesModelWithTransformation(_snake_case )
lowerCAmelCase_ : List[Any] = text_encoder
lowerCAmelCase_ : Optional[Any] = AltDiffusionPipeline(**_snake_case )
lowerCAmelCase_ : Any = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase_ : List[Any] = self.get_dummy_inputs(_snake_case )
lowerCAmelCase_ : Dict = "A photo of an astronaut"
lowerCAmelCase_ : Dict = alt_pipe(**_snake_case )
lowerCAmelCase_ : List[str] = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : str = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Any = self.get_dummy_components()
lowerCAmelCase_ : str = PNDMScheduler(skip_prk_steps=_snake_case )
torch.manual_seed(0 )
lowerCAmelCase_ : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase_ : Any = RobertaSeriesModelWithTransformation(_snake_case )
lowerCAmelCase_ : str = text_encoder
lowerCAmelCase_ : Optional[int] = AltDiffusionPipeline(**_snake_case )
lowerCAmelCase_ : Tuple = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
lowerCAmelCase_ : List[Any] = alt_pipe(**_snake_case )
lowerCAmelCase_ : Tuple = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Union[str, Any] = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int] ):
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : Optional[int] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=_snake_case )
lowerCAmelCase_ : Dict = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase_ : Dict = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : int = torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = alt_pipe([prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
lowerCAmelCase_ : List[Any] = output.images
lowerCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Optional[Any] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : int = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
lowerCAmelCase_ : Tuple = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=_snake_case , safety_checker=_snake_case )
lowerCAmelCase_ : Dict = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase_ : Optional[Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : List[str] = torch.manual_seed(0 )
lowerCAmelCase_ : str = alt_pipe([prompt] , generator=_snake_case , num_inference_steps=2 , output_type="numpy" )
lowerCAmelCase_ : Dict = output.images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 351 |
"""simple docstring"""
import argparse
import os
import re
lowercase__ = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowercase__ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ = re.compile(r"""\[([^\]]+)\]""")
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = _re_indent.search(__UpperCamelCase )
return "" if search is None else search.groups()[0]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase=None , __UpperCamelCase=None ) -> str:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Dict = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__UpperCamelCase ):
index += 1
lowerCAmelCase_ : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase_ : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase_ : Optional[Any] = [lines[index]]
index += 1
while index < len(__UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__UpperCamelCase ) )
if index < len(__UpperCamelCase ) - 1:
lowerCAmelCase_ : List[Any] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase_ : Any = []
else:
blocks.append("\n".join(__UpperCamelCase ) )
lowerCAmelCase_ : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__UpperCamelCase ) > 0:
blocks.append("\n".join(__UpperCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__UpperCamelCase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
def _inner(__UpperCamelCase ):
return key(__UpperCamelCase ).lower().replace("_" , "" )
return _inner
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=None ) -> List[str]:
"""simple docstring"""
def noop(__UpperCamelCase ):
return x
if key is None:
lowerCAmelCase_ : Optional[int] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase_ : str = [obj for obj in objects if key(__UpperCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase_ : str = [obj for obj in objects if key(__UpperCamelCase )[0].isupper() and not key(__UpperCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase_ : int = [obj for obj in objects if not key(__UpperCamelCase )[0].isupper()]
lowerCAmelCase_ : Dict = ignore_underscore(__UpperCamelCase )
return sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase ) -> List[str]:
"""simple docstring"""
def _replace(__UpperCamelCase ):
lowerCAmelCase_ : Tuple = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
lowerCAmelCase_ : Optional[int] = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Optional[int] = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] ) + "]"
lowerCAmelCase_ : Union[str, Any] = import_statement.split("\n" )
if len(__UpperCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase_ : Optional[int] = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase_ : Optional[Any] = [(i, _re_strip_line.search(__UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase_ : List[Any] = sort_objects(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )
lowerCAmelCase_ : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__UpperCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase_ : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase_ : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Any = keys[:-1]
lowerCAmelCase_ : Dict = get_indent(lines[1] ) + ", ".join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] )
return "\n".join(__UpperCamelCase )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase_ : List[str] = _re_bracket_content.sub(_replace , __UpperCamelCase )
return import_statement
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=True ) -> Optional[int]:
"""simple docstring"""
with open(__UpperCamelCase , encoding="utf-8" ) as f:
lowerCAmelCase_ : List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase_ : int = split_code_in_indented_blocks(
__UpperCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__UpperCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase_ : Optional[int] = main_blocks[block_idx]
lowerCAmelCase_ : Union[str, Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase_ : str = 0
while line_idx < len(__UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase_ : Optional[int] = len(__UpperCamelCase )
else:
line_idx += 1
if line_idx >= len(__UpperCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase_ : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase_ : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase_ : Tuple = split_code_in_indented_blocks(__UpperCamelCase , indent_level=__UpperCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase_ : List[Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase_ : Dict = [(pattern.search(__UpperCamelCase ).groups()[0] if pattern.search(__UpperCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase_ : Any = [(i, key) for i, key in enumerate(__UpperCamelCase ) if key is not None]
lowerCAmelCase_ : Union[str, Any] = [x[0] for x in sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : str = []
for i in range(len(__UpperCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase_ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__UpperCamelCase )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase_ : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__UpperCamelCase ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__UpperCamelCase ) )
def __lowerCamelCase ( __UpperCamelCase=True ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
lowerCAmelCase_ : Dict = sort_imports(os.path.join(__UpperCamelCase , "__init__.py" ) , check_only=__UpperCamelCase )
if result:
lowerCAmelCase_ : Union[str, Any] = [os.path.join(__UpperCamelCase , "__init__.py" )]
if len(__UpperCamelCase ) > 0:
raise ValueError(f'''Would overwrite {len(__UpperCamelCase )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowercase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 161 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : Optional[int] = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCAmelCase_ : int = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 338 | 1 |
import random
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list , lowerCAmelCase: int ) -> tuple:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = [], [], []
for element in data:
if element < pivot:
less.append(lowerCAmelCase )
elif element > pivot:
greater.append(lowerCAmelCase )
else:
equal.append(lowerCAmelCase )
return less, equal, greater
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list , lowerCAmelCase: int ) -> List[str]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(lowerCAmelCase ) or index < 0:
return None
_UpperCAmelCase : Union[str, Any] = items[random.randint(0 , len(lowerCAmelCase ) - 1 )]
_UpperCAmelCase : str = 0
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = _partition(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Tuple = len(lowerCAmelCase )
_UpperCAmelCase : List[str] = len(lowerCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowerCAmelCase , lowerCAmelCase )
# must be in larger
else:
return quick_select(lowerCAmelCase , index - (m + count) )
| 189 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE_ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class a ( UpperCAmelCase ):
_lowercase = "facebook/nllb-200-distilled-600M"
_lowercase = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
_lowercase = "translator"
_lowercase = AutoTokenizer
_lowercase = AutoModelForSeqaSeqLM
_lowercase = LANGUAGE_CODES
_lowercase = ["text", "text", "text"]
_lowercase = ["text"]
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
_UpperCAmelCase : int = self.lang_to_code[src_lang]
_UpperCAmelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A_ , return_tensors="pt" , src_lang=A_ , tgt_lang=A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return self.model.generate(**A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A_ )
| 189 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_snake_case = ['''bert-base-uncased''', '''bert-base-cased''']
_snake_case = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class UpperCAmelCase_ ( tf.keras.Model ):
'''simple docstring'''
def __init__( self , __A ):
"""simple docstring"""
super().__init__()
lowerCamelCase : str = tokenizer
lowerCamelCase : int = AutoConfig.from_pretrained(__A )
lowerCamelCase : Optional[Any] = TFAutoModel.from_config(__A )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Any = self.tokenizer(__A )
lowerCamelCase : Tuple = self.bert(**__A )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCamelCase : List[str] = [TFBertTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__A , use_fast_bert_tokenizer=__A )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase : Any = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
lowerCamelCase : str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case ( self ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase : List[str] = tokenizer(__A , return_tensors="tf" , padding="longest" )
lowerCamelCase : List[Any] = tf_tokenizer(__A )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase : Tuple = tf_tokenizer(self.paired_sentences )
lowerCamelCase : Any = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase : List[str] = tf.function(__A )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase : Optional[Any] = tf.constant(__A )
lowerCamelCase : Union[str, Any] = compiled_tokenizer(__A )
lowerCamelCase : List[Any] = tf_tokenizer(__A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase : int = ModelToSave(tokenizer=__A )
lowerCamelCase : str = tf.convert_to_tensor(self.test_sentences )
lowerCamelCase : Optional[Any] = model(__A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase : Tuple = Path(__A ) / "saved.model"
model.save(__A )
lowerCamelCase : Optional[Any] = tf.keras.models.load_model(__A )
lowerCamelCase : List[Any] = loaded_model(__A )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 283 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = "decision_transformer"
__A : Union[str, Any] = ["past_key_values"]
__A : Optional[int] = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __A=17 , __A=4 , __A=128 , __A=4096 , __A=True , __A=1 , __A=1024 , __A=3 , __A=1 , __A=None , __A="relu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1e-5 , __A=0.02 , __A=True , __A=True , __A=5_0256 , __A=5_0256 , __A=False , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : List[str] = state_dim
lowerCamelCase : Tuple = act_dim
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Optional[Any] = max_ep_len
lowerCamelCase : Union[str, Any] = action_tanh
lowerCamelCase : int = vocab_size
lowerCamelCase : List[Any] = n_positions
lowerCamelCase : Dict = n_layer
lowerCamelCase : int = n_head
lowerCamelCase : List[Any] = n_inner
lowerCamelCase : Any = activation_function
lowerCamelCase : Optional[int] = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Tuple = attn_pdrop
lowerCamelCase : List[Any] = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = scale_attn_weights
lowerCamelCase : List[Any] = use_cache
lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx
lowerCamelCase : Optional[int] = reorder_and_upcast_attn
lowerCamelCase : Dict = bos_token_id
lowerCamelCase : Any = eos_token_id
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
| 283 | 1 |
import math
def a_ ( __lowercase : Optional[int] ) -> Tuple:
if not isinstance(_A , _A ):
_snake_case = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_A )
if number < 1:
_snake_case = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_A )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_snake_case = int(math.log(number // 3 , 2 ) ) + 2
_snake_case = [3, 5]
_snake_case = 2
_snake_case = 3
for block in range(1 , _A ):
for _ in range(_A ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
_lowerCamelCase : str = 0
try:
_lowerCamelCase : Union[str, Any] = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}') | 358 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Dict = '''PoolFormerConfig'''
# Base docstring
_lowerCamelCase : int = '''sail/poolformer_s12'''
_lowerCamelCase : Optional[Any] = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase : Optional[int] = '''sail/poolformer_s12'''
_lowerCamelCase : List[Any] = '''tabby, tabby cat'''
_lowerCamelCase : List[str] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def a_ ( __lowercase : List[Any] , __lowercase : float = 0.0 , __lowercase : bool = False ) -> Optional[int]:
if drop_prob == 0.0 or not training:
return input
_snake_case = 1 - drop_prob
_snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_snake_case = keep_prob + torch.rand(__lowercase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_snake_case = input.div(__lowercase ) * random_tensor
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Optional[float] = None ):
'''simple docstring'''
super().__init__()
_snake_case = drop_prob
def A ( self : Any , lowercase : torch.Tensor ):
'''simple docstring'''
return drop_path(lowercase , self.drop_prob , self.training )
def A ( self : Tuple ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Dict , lowercase : Dict , lowercase : str , lowercase : int , lowercase : Optional[Any] , lowercase : str=None ):
'''simple docstring'''
super().__init__()
_snake_case = patch_size if isinstance(lowercase , collections.abc.Iterable ) else (patch_size, patch_size)
_snake_case = stride if isinstance(lowercase , collections.abc.Iterable ) else (stride, stride)
_snake_case = padding if isinstance(lowercase , collections.abc.Iterable ) else (padding, padding)
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=lowercase )
_snake_case = norm_layer(lowercase ) if norm_layer else nn.Identity()
def A ( self : int , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.projection(lowercase )
_snake_case = self.norm(lowercase )
return embeddings
class SCREAMING_SNAKE_CASE__ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Dict , lowercase : List[Any] , **lowercase : str ):
'''simple docstring'''
super().__init__(1 , lowercase , **lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowercase : List[Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.AvgPoolad(lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase )
def A ( self : int , lowercase : List[str] ):
'''simple docstring'''
return self.pool(lowercase ) - hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Tuple , lowercase : str , lowercase : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , 1 )
_snake_case = nn.Convad(lowercase , lowercase , 1 )
_snake_case = PoolFormerDropPath(lowercase )
if isinstance(config.hidden_act , lowercase ):
_snake_case = ACTaFN[config.hidden_act]
else:
_snake_case = config.hidden_act
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
_snake_case = self.conva(lowercase )
_snake_case = self.act_fn(lowercase )
_snake_case = self.drop(lowercase )
_snake_case = self.conva(lowercase )
_snake_case = self.drop(lowercase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , lowercase : Tuple , lowercase : int , lowercase : str , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case = PoolFormerPooling(lowercase )
_snake_case = PoolFormerOutput(lowercase , lowercase , lowercase , lowercase )
_snake_case = PoolFormerGroupNorm(lowercase )
_snake_case = PoolFormerGroupNorm(lowercase )
# Useful for training neural nets
_snake_case = PoolFormerDropPath(lowercase ) if drop_path > 0.0 else nn.Identity()
_snake_case = config.use_layer_scale
if config.use_layer_scale:
_snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase )
_snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] ):
'''simple docstring'''
if self.use_layer_scale:
_snake_case = self.pooling(self.before_norm(lowercase ) )
_snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_snake_case = hidden_states + self.drop_path(lowercase )
_snake_case = ()
_snake_case = self.output(self.after_norm(lowercase ) )
_snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_snake_case = hidden_states + self.drop_path(lowercase )
_snake_case = (output,) + outputs
return outputs
else:
_snake_case = self.drop_path(self.pooling(self.before_norm(lowercase ) ) )
# First residual connection
_snake_case = pooling_output + hidden_states
_snake_case = ()
# Second residual connection inside the PoolFormerOutput block
_snake_case = self.drop_path(self.output(self.after_norm(lowercase ) ) )
_snake_case = hidden_states + layer_output
_snake_case = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
super().__init__()
_snake_case = config
# stochastic depth decay rule
_snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_snake_case = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_snake_case = nn.ModuleList(lowercase )
# Transformer blocks
_snake_case = []
_snake_case = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_snake_case = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowercase ) )
_snake_case = nn.ModuleList(lowercase )
def A ( self : Any , lowercase : List[str] , lowercase : str=False , lowercase : Tuple=True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
_snake_case = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_snake_case , _snake_case = layers
# Get patch embeddings from hidden_states
_snake_case = embedding_layer(lowercase )
# Send the embeddings through the blocks
for _, blk in enumerate(lowercase ):
_snake_case = blk(lowercase )
_snake_case = layer_outputs[0]
if output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PoolFormerConfig
_UpperCAmelCase : Optional[int] = "poolformer"
_UpperCAmelCase : str = "pixel_values"
_UpperCAmelCase : int = True
def A ( self : Tuple , lowercase : str ):
'''simple docstring'''
if isinstance(lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def A ( self : Optional[Any] , lowercase : str , lowercase : Dict=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : Optional[Any] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : Tuple = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowercase : List[Any] ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = PoolFormerEncoder(lowercase )
# Initialize weights and apply final processing
self.post_init()
def A ( self : List[str] ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Tuple , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , )
_snake_case = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Linear(config.hidden_size , config.hidden_size )
def A ( self : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = self.dense(lowercase )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = PoolFormerModel(lowercase )
# Final norm
_snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_snake_case = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.poolformer(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , )
_snake_case = outputs[0]
_snake_case = self.classifier(self.norm(lowercase ).mean([-2, -1] ) )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states ) | 130 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def a ( __a="" ) -> str:
'''simple docstring'''
UpperCamelCase__ :Dict = tempfile.mkdtemp()
return os.path.join(__a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCamelCase__ :str = AgentAudio(UpperCamelCase_ )
UpperCamelCase__ :Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = sf.read(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , torch.tensor(UpperCamelCase_ ) , atol=1e-4 ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCamelCase__ :Optional[Any] = get_new_path(suffix='''.wav''' )
sf.write(UpperCamelCase_ , UpperCamelCase_ , 16000 )
UpperCamelCase__ :List[Any] = AgentAudio(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase_ )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = torch.randint(0 , 256 , (64, 64, 3) )
UpperCamelCase__ :List[str] = AgentImage(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCamelCase__ :str = Image.open(UpperCamelCase_ )
UpperCamelCase__ :int = AgentImage(UpperCamelCase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCamelCase__ :List[Any] = Image.open(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = AgentImage(UpperCamelCase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = '''Hey!'''
UpperCamelCase__ :str = AgentText(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , agent_type.to_string() )
self.assertEqual(UpperCamelCase_ , agent_type.to_raw() )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) | 97 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( lowerCAmelCase ):
snake_case__ : Tuple = ['image_processor', 'tokenizer']
snake_case__ : List[Any] = 'BlipImageProcessor'
snake_case__ : Tuple = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = False
super().__init__(_snake_case , _snake_case )
lowercase = self.image_processor
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowercase = self.tokenizer
lowercase = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
return text_encoding
# add pixel_values
lowercase = self.image_processor(_snake_case , return_tensors=_snake_case )
if text is not None:
lowercase = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
else:
lowercase = None
if text_encoding is not None:
encoding_image_processor.update(_snake_case )
return encoding_image_processor
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer.model_input_names
lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 360 | """simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Union[str, Any] = LongformerTokenizer
_snake_case : Union[str, Any] = True
_snake_case : Union[str, Any] = LongformerTokenizerFast
_snake_case : Dict = True
def lowerCAmelCase_ ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_UpperCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_UpperCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_UpperCAmelCase = {"""unk_token""": """<unk>"""}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Any , **__lowerCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , **__lowerCAmelCase : Tuple ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = """lower newer"""
_UpperCAmelCase = """lower newer"""
return input_text, output_text
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = """lower newer"""
_UpperCAmelCase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_UpperCAmelCase = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__lowerCAmelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__lowerCAmelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
_UpperCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = """Encode this sequence."""
_UpperCAmelCase = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
_UpperCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
_UpperCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing spaces after special tokens
_UpperCAmelCase = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_UpperCAmelCase = """Encode <mask> sequence"""
_UpperCAmelCase = """Encode <mask>sequence"""
_UpperCAmelCase = tokenizer.encode(__lowerCAmelCase )
_UpperCAmelCase = encoded.index(__lowerCAmelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = tokenizer.encode(__lowerCAmelCase )
_UpperCAmelCase = encoded.index(__lowerCAmelCase )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = """A, <mask> AllenNLP sentence."""
_UpperCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase_ ( self : List[str] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __lowerCAmelCase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __lowerCAmelCase )
self.assertEqual(post_processor_state["""trim_offsets"""] , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCAmelCase = f'''{text_of_1_token} {text_of_1_token}'''
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_UpperCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_UpperCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_UpperCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_UpperCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_UpperCAmelCase = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_UpperCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_UpperCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_UpperCAmelCase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
| 289 | """simple docstring"""
from math import pow
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_UpperCAmelCase = int(pow(lowercase ,lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_UpperCAmelCase , _UpperCAmelCase = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
return current_sum, solutions_count
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__A ='''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__A =[
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = SavedModel()
lowerCamelCase_ = []
with open(os.path.join(lowerCamelCase__ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
lowerCamelCase_ = json.load(lowerCamelCase__ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowerCamelCase__ )] )
with open(lowerCamelCase__ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
lowerCamelCase_ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowerCamelCase_ = sorted(lowerCamelCase__ )
lowerCamelCase_ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowerCamelCase__ )
if strict and len(lowerCamelCase__ ) > 0:
raise Exception(F'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(lowerCamelCase__ ) > 0:
print(F'Found the following incompatible ops for the opset {opset}:' )
print(*lowerCamelCase__ , sep="\n" )
else:
print(F'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__A =parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 367 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A ={
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a : Dict = logging.get_logger(__name__)
a : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
a : int = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
a : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class a ( a_ ):
"""simple docstring"""
a : Union[str, Any] = VOCAB_FILES_NAMES
a : int = PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict = PRETRAINED_INIT_CONFIGURATION
a : Dict = ['''input_ids''', '''attention_mask''']
a : List[str] = DistilBertTokenizer
def __init__( self : Tuple , __lowercase : Optional[Any]=None , __lowercase : int=None , __lowercase : Optional[Any]=True , __lowercase : List[str]="[UNK]" , __lowercase : Optional[int]="[SEP]" , __lowercase : Optional[Any]="[PAD]" , __lowercase : Optional[Any]="[CLS]" , __lowercase : Tuple="[MASK]" , __lowercase : List[str]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> List[str]:
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase : str = getattr(_lowercase , normalizer_state.pop("""type""" ) )
__UpperCAmelCase : int = do_lower_case
__UpperCAmelCase : Dict = strip_accents
__UpperCAmelCase : Tuple = tokenize_chinese_chars
__UpperCAmelCase : Any = normalizer_class(**_lowercase )
__UpperCAmelCase : Any = do_lower_case
def UpperCAmelCase ( self : int , __lowercase : List[Any] , __lowercase : Optional[Any]=None ) -> Tuple:
__UpperCAmelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self : int , __lowercase : Union[str, Any] , __lowercase : List[str] = None ) -> List[int]:
__UpperCAmelCase : Any = [self.sep_token_id]
__UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : str , __lowercase : List[Any] , __lowercase : str = None ) -> Tuple[str]:
__UpperCAmelCase : Tuple = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 114 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A__(unittest.TestCase ):
"""simple docstring"""
_A : List[str] = StableDiffusionLDMaDPipeline
_A : int = TEXT_TO_IMAGE_PARAMS
_A : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_A : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a_ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
a_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ : Tuple = CLIPTextModel(_lowercase )
a_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Any:
if str(_lowercase ).startswith("""mps""" ):
a_ : Optional[Any] = torch.manual_seed(_lowercase )
else:
a_ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Any = self.get_dummy_components()
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Union[str, Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : int = self.get_dummy_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : Tuple = output.rgb, output.depth
a_ : Union[str, Any] = rgb[0, -3:, -3:, -1]
a_ : Any = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[Any] = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
a_ : int = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = self.get_dummy_components()
a_ : Optional[int] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Optional[Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
a_ : Optional[int] = ldmad_pipe(**_lowercase )
a_ , a_ : Any = output.rgb, output.depth
a_ : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs.pop("""prompt""" )]
a_ : List[Any] = ldmad_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
a_ : int = text_inputs["""input_ids"""].to(_lowercase )
a_ : Any = ldmad_pipe.text_encoder(_lowercase )[0]
a_ : Dict = prompt_embeds
# forward
a_ : int = ldmad_pipe(**_lowercase )
a_ , a_ : Optional[int] = output.rgb, output.depth
a_ : List[str] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase__ ( self ) -> Dict:
a_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : Any = PNDMScheduler(skip_prk_steps=_lowercase )
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[Any] = self.get_dummy_inputs(_lowercase )
a_ : int = """french fries"""
a_ : Any = ldmad_pipe(**_lowercase , negative_prompt=_lowercase )
a_ , a_ : Optional[Any] = output.rgb, output.depth
a_ : Tuple = rgb[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[int] = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
a_ : Union[str, Any] = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> List[str]:
a_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Dict = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Tuple = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Any:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_inputs(_lowercase )
a_ : Optional[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : int = output.rgb, output.depth
a_ : str = rgb[0, -3:, -3:, -1].flatten()
a_ : Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
a_ : Optional[int] = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
a_ : Optional[int] = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> str:
a_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Tuple = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Any = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : Union[str, Any] = ldmad_pipe(**_lowercase )
a_ , a_ : str = output.rgb, output.depth
a_ : List[str] = 0.4_9_5_5_8_6
a_ : int = 0.3_3_7_9_5_5_1_5
a_ : int = 1_1_2.4_8_5_1_8
a_ : Optional[int] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : List[Any] = output.rgb, output.depth
a_ : int = 0.4_1_9_4_1_2_7
a_ : List[str] = 0.3_5_3_7_5_5_8_6
a_ : Optional[int] = 0.5_6_3_8_5_0_2
a_ : str = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 248 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Optional[int] = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
A_ : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
A_ : Tuple = n - k
# Calculate C(n,k)
for i in range(a_ ):
result *= n - i
result //= i + 1
return result
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , a_ ) // (node_count + 1)
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
A_ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return catalan_number(a_ ) * factorial(a_ )
if __name__ == "__main__":
UpperCamelCase__ : Any = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
f'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 164 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["ChineseCLIPFeatureExtractor"]
__magic_name__ = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: str = 1
__lowerCAmelCase: Union[str, Any] = 3
__lowerCAmelCase: Union[str, Any] = (3_2, 3_2)
__lowerCAmelCase: Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCamelCase__)
return image
@property
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=UpperCamelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: Tuple = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(UpperCamelCase__)
def lowercase_ ( self : List[str])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: int = self.dummy_cond_unet_upscale
__lowerCAmelCase: int = DDPMScheduler()
__lowerCAmelCase: List[str] = DDIMScheduler(prediction_type="v_prediction")
__lowerCAmelCase: Tuple = self.dummy_vae
__lowerCAmelCase: Optional[Any] = self.dummy_text_encoder
__lowerCAmelCase: Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__lowerCAmelCase: Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowerCAmelCase: List[Any] = Image.fromarray(np.uinta(UpperCamelCase__)).convert("RGB").resize((6_4, 6_4))
# make sure here that pndm scheduler skips prk
__lowerCAmelCase: Optional[int] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=3_5_0 , )
__lowerCAmelCase: Tuple = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
__lowerCAmelCase: Any = "A painting of a squirrel eating a burger"
__lowerCAmelCase: str = torch.Generator(device=UpperCamelCase__).manual_seed(0)
__lowerCAmelCase: Optional[int] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase: List[str] = output.images
__lowerCAmelCase: Union[str, Any] = torch.Generator(device=UpperCamelCase__).manual_seed(0)
__lowerCAmelCase: List[str] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=UpperCamelCase__ , )[0]
__lowerCAmelCase: int = image[0, -3:, -3:, -1]
__lowerCAmelCase: Dict = image_from_tuple[0, -3:, -3:, -1]
__lowerCAmelCase: Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__lowerCAmelCase: List[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self : List[str])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: Dict = self.dummy_cond_unet_upscale
__lowerCAmelCase: List[str] = DDPMScheduler()
__lowerCAmelCase: Union[str, Any] = DDIMScheduler(prediction_type="v_prediction")
__lowerCAmelCase: Optional[int] = self.dummy_vae
__lowerCAmelCase: List[Any] = self.dummy_text_encoder
__lowerCAmelCase: Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__lowerCAmelCase: List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowerCAmelCase: str = Image.fromarray(np.uinta(UpperCamelCase__)).convert("RGB").resize((6_4, 6_4))
# make sure here that pndm scheduler skips prk
__lowerCAmelCase: Optional[int] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=3_5_0 , )
__lowerCAmelCase: Optional[int] = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
__lowerCAmelCase: List[str] = "A painting of a squirrel eating a burger"
__lowerCAmelCase: List[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase: List[Any] = output.images
assert image.shape[0] == 2
__lowerCAmelCase: Dict = torch.Generator(device=UpperCamelCase__).manual_seed(0)
__lowerCAmelCase: Optional[Any] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase: List[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU")
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.dummy_cond_unet_upscale
__lowerCAmelCase: int = DDPMScheduler()
__lowerCAmelCase: int = DDIMScheduler(prediction_type="v_prediction")
__lowerCAmelCase: Dict = self.dummy_vae
__lowerCAmelCase: int = self.dummy_text_encoder
__lowerCAmelCase: List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__lowerCAmelCase: List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowerCAmelCase: Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__)).convert("RGB").resize((6_4, 6_4))
# put models in fp16, except vae as it overflows in fp16
__lowerCAmelCase: List[Any] = unet.half()
__lowerCAmelCase: List[str] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__lowerCAmelCase: List[Any] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=3_5_0 , )
__lowerCAmelCase: str = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = "A painting of a squirrel eating a burger"
__lowerCAmelCase: str = torch.manual_seed(0)
__lowerCAmelCase: Dict = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" , ).images
__lowerCAmelCase: Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Tuple)-> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
__lowerCAmelCase: Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy")
__lowerCAmelCase: str = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase__)
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing()
__lowerCAmelCase: Tuple = "a cat sitting on a park bench"
__lowerCAmelCase: int = torch.manual_seed(0)
__lowerCAmelCase: List[Any] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="np" , )
__lowerCAmelCase: Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 1e-3
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
__lowerCAmelCase: Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
__lowerCAmelCase: Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy")
__lowerCAmelCase: Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase: Tuple = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing()
__lowerCAmelCase: str = "a cat sitting on a park bench"
__lowerCAmelCase: List[str] = torch.manual_seed(0)
__lowerCAmelCase: Optional[Any] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="np" , )
__lowerCAmelCase: Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def lowercase_ ( self : Optional[int])-> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase: Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
__lowerCAmelCase: Union[str, Any] = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase: Any = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase: int = "a cat sitting on a park bench"
__lowerCAmelCase: Dict = torch.manual_seed(0)
__lowerCAmelCase: Dict = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , output_type="np" , )
__lowerCAmelCase: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 217 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase( lowercase_ ) -> Dict:
'''simple docstring'''
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case_ = BitConfig(
conv_layer=_lowerCamelCase , num_labels=1000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def UpperCamelCase( lowercase_ ) -> List[str]:
'''simple docstring'''
if "stem.conv" in name:
snake_case_ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case_ = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case_ = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case_ = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case_ = """bit.encoder.""" + name
return name
def UpperCamelCase( ) -> int:
'''simple docstring'''
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=False ) -> Any:
'''simple docstring'''
snake_case_ = get_config(_lowerCamelCase )
# load original model from timm
snake_case_ = create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model
snake_case_ = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(_lowerCamelCase )
snake_case_ = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case_ = BitForImageClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
snake_case_ = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
snake_case_ = transform.transforms
snake_case_ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case_ = BitImageProcessor(
do_resize=_lowerCamelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ = prepare_img()
snake_case_ = transform(_lowerCamelCase ).unsqueeze(0 )
snake_case_ = processor(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
snake_case_ = model(_lowerCamelCase )
snake_case_ = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case_ = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
lowerCamelCase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 363 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase_ = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=8 ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
snake_case_ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
if latents is None:
snake_case_ = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
snake_case_ = latents.to(lowerCamelCase )
snake_case_ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , ) -> Any:
snake_case_ = len(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , truncation=lowerCamelCase , max_length=77 , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = text_inputs.input_ids
snake_case_ = self.tokenizer(lowerCamelCase , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase , lowerCamelCase ):
snake_case_ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
snake_case_ = text_input_ids.to(lowerCamelCase )
snake_case_ = text_inputs.attention_mask.to(lowerCamelCase )
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
snake_case_ = prompt_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = text_encoder_hidden_states.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = text_mask.repeat_interleave(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = 42
if negative_prompt is None:
snake_case_ = [""""""] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='''
f''' {type(lowerCamelCase )}.''' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
snake_case_ = negative_prompt
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , max_length=77 , truncation=lowerCamelCase , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = uncond_input.input_ids.to(lowerCamelCase )
snake_case_ = uncond_input.attention_mask.to(lowerCamelCase )
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , lowerCamelCase )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase )
snake_case_ = uncond_text_encoder_hidden_states.shape[1]
snake_case_ = uncond_text_encoder_hidden_states.repeat(1 , lowerCamelCase , 1 )
snake_case_ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowerCamelCase , -1 )
snake_case_ = uncond_text_mask.repeat_interleave(lowerCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
snake_case_ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
snake_case_ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase_ ( self , lowerCamelCase=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
snake_case_ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
snake_case_ , snake_case_ = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
if self.safety_checker is not None:
snake_case_ , snake_case_ = cpu_offload_with_hook(self.safety_checker , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ) -> List[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 100 , lowerCamelCase = 4.0 , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ) -> Union[str, Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = len(lowerCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}''' )
snake_case_ = self._execution_device
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ , snake_case_ , snake_case_ = self._encode_prompt(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = torch.cat(lowerCamelCase , dim=0 )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = torch.cat(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
snake_case_ = self.scheduler.timesteps
snake_case_ = self.unet.config.in_channels
snake_case_ , snake_case_ = get_new_h_w(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
snake_case_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
snake_case_ = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ = noise_pred.chunk(2 )
snake_case_ , snake_case_ = variance_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , ).prev_sample
# post-processing
snake_case_ = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase ) | 34 | 0 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A__ : List[Any] = True
except ImportError:
A__ : List[str] = False
A__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( lowerCamelCase__ : Optional[Any] ) -> Union[str, Any]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowercase__ ( A__ ):
@staticmethod
def UpperCAmelCase__ ( snake_case__ : str ):
lowerCamelCase_ : Tuple =parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=UpperCamelCase_ , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=UpperCamelCase_ , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self : str , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any]=None , *snake_case__ : int ):
lowerCamelCase_ : int =testing
lowerCamelCase_ : Optional[int] =testing_file
lowerCamelCase_ : Dict =path
def UpperCAmelCase__ ( self : List[Any] ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase_ : Optional[int] =[directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(UpperCamelCase_ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCamelCase_ : Optional[int] =(
Path(UpperCamelCase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase_ : int =path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase_ ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCamelCase_ : Union[str, Any] =json.load(UpperCamelCase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=UpperCamelCase_ , extra_context=UpperCamelCase_ , )
lowerCamelCase_ : List[Any] =[directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCamelCase_ : Any =json.load(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] =configuration['''lowercase_modelname''']
lowerCamelCase_ : Union[str, Any] =configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F"""{directory}/configuration.json""" )
lowerCamelCase_ : str ='''PyTorch''' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ : str ='''TensorFlow''' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ : List[str] ='''Flax''' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ : List[Any] =F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=UpperCamelCase_ )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , "w" ):
pass
shutil.move(
F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(snake_case__ : Dict ):
with open(UpperCamelCase_ , "r" ) as f:
lowerCamelCase_ : List[str] =f.readlines()
with open(UpperCamelCase_ , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
# Create temp file
lowerCamelCase_ : List[str] =mkstemp()
lowerCamelCase_ : Optional[int] =False
with fdopen(UpperCamelCase_ , "w" ) as new_file:
with open(UpperCamelCase_ ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase_ )
if line_to_copy_below in line:
lowerCamelCase_ : int =True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase_ )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase_ , UpperCamelCase_ )
# Remove original file
remove(UpperCamelCase_ )
# Move new file
move(UpperCamelCase_ , UpperCamelCase_ )
def skip_units(snake_case__ : Union[str, Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(snake_case__ : Tuple ):
with open(UpperCamelCase_ ) as datafile:
lowerCamelCase_ : Union[str, Any] =[]
lowerCamelCase_ : int =False
lowerCamelCase_ : Dict =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase_ : List[str] =line.split("\"" )[1]
lowerCamelCase_ : int =skip_units(UpperCamelCase_ )
elif "# Below: " in line and "##" not in line:
lowerCamelCase_ : str =line.split("\"" )[1]
lowerCamelCase_ : Tuple =skip_units(UpperCamelCase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] =[]
elif "# Replace with" in line and "##" not in line:
lowerCamelCase_ : str =[]
elif "##" not in line:
lines_to_copy.append(UpperCamelCase_ )
remove(UpperCamelCase_ )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(UpperCamelCase_ )
| 144 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 334 | 1 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
__lowerCAmelCase = load_dataset("ashraq/esc50" )
__lowerCAmelCase = dataset["train"]["audio"][-1]["array"]
__lowerCAmelCase = audio_classifier(__a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__a ) , [{"score": 0.5_0_1, "label": "Sound of a dog"}, {"score": 0.4_9_9, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def snake_case ( self ):
pass
@slow
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
__lowerCAmelCase = load_dataset("ashraq/esc50" )
__lowerCAmelCase = dataset["train"]["audio"][-1]["array"]
__lowerCAmelCase = audio_classifier(__a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
] , )
__lowerCAmelCase = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
__lowerCAmelCase = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def snake_case ( self ):
pass
| 57 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =["""image_processor""", """tokenizer"""]
snake_case_ ="""Pix2StructImageProcessor"""
snake_case_ =("""T5Tokenizer""", """T5TokenizerFast""")
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = False
super().__init__(__lowerCamelCase ,__lowerCamelCase )
def __call__(self ,__lowerCamelCase=None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = 20_48 ,__lowerCamelCase = 0 ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = True ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase__ : List[str] = self.tokenizer
lowerCAmelCase__ : List[str] = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase__ : int = self.image_processor(
__lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,**__lowerCamelCase )
else:
# add pixel_values and bbox
lowerCAmelCase__ : List[str] = self.image_processor(
__lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,header_text=__lowerCamelCase ,**__lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase__ : List[str] = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
if "attention_mask" in text_encoding:
lowerCAmelCase__ : List[str] = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
lowerCAmelCase__ : Dict = text_encoding.pop('''input_ids''' )
else:
lowerCAmelCase__ : int = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.tokenizer.model_input_names
lowerCAmelCase__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 129 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCAmelCase : int =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Tuple = 'AutoTokenizer'
snake_case__ : Optional[Any] = ['tokenizer']
snake_case__ : Union[str, Any] = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = speaker_embeddings
@classmethod
def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase="speaker_embeddings_path.json" , **__lowerCAmelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
lowercase = get_file_from_repo(
__lowerCAmelCase , __lowerCAmelCase , subfolder=kwargs.pop("""subfolder""" , __lowerCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , __lowerCAmelCase ) , force_download=kwargs.pop("""force_download""" , __lowerCAmelCase ) , proxies=kwargs.pop("""proxies""" , __lowerCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , __lowerCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , __lowerCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , __lowerCAmelCase ) , revision=kwargs.pop("""revision""" , __lowerCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(__lowerCAmelCase , __lowerCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
lowercase = None
else:
with open(__lowerCAmelCase ) as speaker_embeddings_json:
lowercase = json.load(__lowerCAmelCase )
else:
lowercase = None
lowercase = AutoTokenizer.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
return cls(tokenizer=__lowerCAmelCase , speaker_embeddings=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase="speaker_embeddings_path.json" , __lowerCAmelCase="speaker_embeddings" , __lowerCAmelCase = False , **__lowerCAmelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__lowerCAmelCase , __lowerCAmelCase , """v2""" ) , exist_ok=__lowerCAmelCase )
lowercase = {}
lowercase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase = self._load_voice_preset(__lowerCAmelCase )
lowercase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , __lowerCAmelCase , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=__lowerCAmelCase , )
lowercase = os.path.join(__lowerCAmelCase , f'{prompt_key}_{key}.npy' )
lowercase = tmp_dict
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
super().save_pretrained(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase = None , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = self.speaker_embeddings[voice_preset]
lowercase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
lowercase = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , __lowerCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , __lowerCAmelCase ) , force_download=kwargs.pop("""force_download""" , __lowerCAmelCase ) , proxies=kwargs.pop("""proxies""" , __lowerCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , __lowerCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , __lowerCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , __lowerCAmelCase ) , revision=kwargs.pop("""revision""" , __lowerCAmelCase ) , )
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
lowercase = np.load(__lowerCAmelCase )
return voice_preset_dict
def A__ ( self , __lowerCAmelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="pt" , __lowerCAmelCase=256 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase = self._load_voice_preset(__lowerCAmelCase )
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not voice_preset.endswith(""".npz""" ):
lowercase = voice_preset + """.npz"""
lowercase = np.load(__lowerCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(__lowerCAmelCase , **__lowerCAmelCase )
lowercase = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
lowercase = self.tokenizer(
__lowerCAmelCase , return_tensors=__lowerCAmelCase , padding="""max_length""" , max_length=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
if voice_preset is not None:
lowercase = voice_preset
return encoded_text
| 32 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list[list]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
lowercase = data_set.copy()
lowercase = simplify(lowerCAmelCase__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = StableDiffusionInpaintPipeline
__magic_name__ :List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__magic_name__ :Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__magic_name__ :Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__magic_name__ :Optional[Any] = frozenset([] )
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
lowerCAmelCase__ :Optional[Any] = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase__ :Tuple = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ :Dict = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ :Optional[int] = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCAmelCase__ :List[str] = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Tuple = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :Any = self.get_dummy_components()
lowerCAmelCase__ :Any = StableDiffusionInpaintPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :Tuple = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = sd_pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase__ :List[str] = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase__ :Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase__ :Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
lowerCAmelCase__ :Optional[Any] = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase__ :Tuple = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ :str = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase__ :List[str] = torch.manual_seed(0 )
lowerCAmelCase__ :str = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase__ :int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase__ :Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
lowerCAmelCase__ :str = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase__ :List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ :List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase__ :int = torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ :List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase__ :List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase__ :Tuple = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase__ :int = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder='scheduler' )
lowerCAmelCase__ :Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :Optional[int] = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase__ :Dict = torch.manual_seed(0 )
lowerCAmelCase__ :str = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ :Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 254 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__A = random.Random()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
"""simple docstring"""
if rng is None:
lowerCAmelCase__ :int = global_rng
lowerCAmelCase__ :str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=2_0_0_0 , __UpperCAmelCase=1_0 , __UpperCAmelCase=1_6_0 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=4_0_0_0 , __UpperCAmelCase=False , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :int = parent
lowerCAmelCase__ :Optional[int] = batch_size
lowerCAmelCase__ :Optional[Any] = min_seq_length
lowerCAmelCase__ :Optional[int] = max_seq_length
lowerCAmelCase__ :Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ :Union[str, Any] = padding_value
lowerCAmelCase__ :Optional[int] = sampling_rate
lowerCAmelCase__ :Optional[int] = return_attention_mask
lowerCAmelCase__ :Union[str, Any] = do_normalize
lowerCAmelCase__ :Any = feature_size
lowerCAmelCase__ :Union[str, Any] = chunk_length
lowerCAmelCase__ :List[Any] = hop_length
def snake_case ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case ( self , __UpperCAmelCase=False , __UpperCAmelCase=False ):
'''simple docstring'''
def _flatten(__UpperCAmelCase ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
lowerCAmelCase__ :Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ :Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ :Optional[int] = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = WhisperFeatureExtractionTester(self )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Optional[Any] = feat_extract_first.save_pretrained(__UpperCAmelCase )[0]
check_json_file_has_correct_format(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.feature_extraction_class.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = feat_extract_first.to_dict()
lowerCAmelCase__ :List[Any] = feat_extract_second.to_dict()
lowerCAmelCase__ :int = feat_extract_first.mel_filters
lowerCAmelCase__ :Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Optional[int] = os.path.join(__UpperCAmelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(__UpperCAmelCase )
lowerCAmelCase__ :str = self.feature_extraction_class.from_json_file(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = feat_extract_first.to_dict()
lowerCAmelCase__ :List[Any] = feat_extract_second.to_dict()
lowerCAmelCase__ :str = feat_extract_first.mel_filters
lowerCAmelCase__ :Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ :List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase__ :int = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase__ :int = feature_extractor(__UpperCAmelCase , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase__ :Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCAmelCase__ :Dict = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test batched
lowerCAmelCase__ :Optional[Any] = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
lowerCAmelCase__ :Dict = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ :List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase__ :Optional[Any] = np.asarray(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
lowerCAmelCase__ :List[Any] = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test truncation required
lowerCAmelCase__ :Any = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
lowerCAmelCase__ :Any = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
lowerCAmelCase__ :str = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCAmelCase__ :Union[str, Any] = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs_truncated]
lowerCAmelCase__ :Any = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
lowerCAmelCase__ :int = feature_extractor(__UpperCAmelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def snake_case ( self ):
'''simple docstring'''
import torch
lowerCAmelCase__ :str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ :Dict = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
lowerCAmelCase__ :Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ :List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase__ :List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase__ :str = ds.sort('id' ).select(range(__UpperCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
lowerCAmelCase__ :Tuple = self._load_datasamples(1 )
lowerCAmelCase__ :Any = WhisperFeatureExtractor()
lowerCAmelCase__ :List[str] = feature_extractor(__UpperCAmelCase , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , __UpperCAmelCase , atol=1E-4 ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ :int = self._load_datasamples(1 )[0]
lowerCAmelCase__ :Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
lowerCAmelCase__ :Any = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCAmelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCAmelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase ) - 1 ) < 1E-3 ) )
| 254 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : float ):
"""simple docstring"""
return 10 - x * x
def UpperCAmelCase__ (snake_case__ : float , snake_case__ : float ):
"""simple docstring"""
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError("""Wrong space!""" )
_snake_case : Tuple = a
while (b - a) >= 0.01:
# Find middle point
_snake_case : Optional[Any] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
_snake_case : Optional[int] = c
else:
_snake_case : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 64 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
def __init__( self :List[str] , _A :Tuple , _A :Optional[int]=13 , _A :List[Any]=7 , _A :Tuple=True , _A :Optional[Any]=True , _A :int=True , _A :Union[str, Any]=True , _A :Union[str, Any]=True , _A :Union[str, Any]=False , _A :int=False , _A :Any=False , _A :Tuple=2 , _A :Tuple=99 , _A :Union[str, Any]=0 , _A :Union[str, Any]=32 , _A :str=5 , _A :Optional[Any]=4 , _A :List[str]=0.1 , _A :List[Any]=0.1 , _A :Optional[Any]=512 , _A :Dict=2 , _A :Any=0.02 , _A :int=2 , _A :Dict=4 , _A :Optional[int]="last" , _A :str=True , _A :List[str]=None , _A :Optional[int]=0 , ) -> int:
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_lengths
__A = use_token_type_ids
__A = use_labels
__A = gelu_activation
__A = sinusoidal_embeddings
__A = causal
__A = asm
__A = n_langs
__A = vocab_size
__A = n_special
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = summary_type
__A = use_proj
__A = scope
__A = bos_token_id
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_input_lengths:
__A = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , 2 ).float()
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase_ ( self :str , _A :Optional[int] , _A :Dict , _A :Union[str, Any] , _A :List[Any] , _A :str , _A :Union[str, Any] , _A :Optional[Any] , _A :List[str] , _A :Dict , ) -> Any:
'''simple docstring'''
__A = XLMModel(config=_A )
model.to(_A )
model.eval()
__A = model(_A , lengths=_A , langs=_A )
__A = model(_A , langs=_A )
__A = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self :int , _A :List[Any] , _A :List[str] , _A :List[Any] , _A :int , _A :Optional[int] , _A :Optional[Any] , _A :Dict , _A :List[Any] , _A :List[Any] , ) -> List[Any]:
'''simple docstring'''
__A = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
__A = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self :Union[str, Any] , _A :str , _A :List[str] , _A :Union[str, Any] , _A :str , _A :Any , _A :Dict , _A :Any , _A :Union[str, Any] , _A :Optional[Any] , ) -> int:
'''simple docstring'''
__A = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
__A = model(_A )
__A = model(_A , start_positions=_A , end_positions=_A )
__A = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self :Union[str, Any] , _A :Any , _A :Union[str, Any] , _A :str , _A :Dict , _A :Optional[Any] , _A :Union[str, Any] , _A :List[str] , _A :str , _A :Optional[Any] , ) -> int:
'''simple docstring'''
__A = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
__A = model(_A )
__A = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
__A = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((__A) , ) = result_with_labels.to_tuple()
__A = model(_A , start_positions=_A , end_positions=_A )
((__A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase_ ( self :Optional[int] , _A :Optional[Any] , _A :Optional[int] , _A :List[Any] , _A :int , _A :Tuple , _A :Union[str, Any] , _A :List[Any] , _A :List[str] , _A :Dict , ) -> str:
'''simple docstring'''
__A = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
__A = model(_A )
__A = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self :Optional[int] , _A :str , _A :List[str] , _A :Union[str, Any] , _A :Dict , _A :int , _A :Dict , _A :Union[str, Any] , _A :int , _A :Optional[Any] , ) -> List[str]:
'''simple docstring'''
__A = self.num_labels
__A = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
__A = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self :List[str] , _A :Optional[Any] , _A :List[str] , _A :List[Any] , _A :Union[str, Any] , _A :Any , _A :List[str] , _A :Optional[Any] , _A :Any , _A :Tuple , ) -> List[Any]:
'''simple docstring'''
__A = self.num_choices
__A = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : List[Any] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self :int , _A :int , _A :Optional[Any] , _A :Dict , _A :List[Any] , _A :str ) -> str:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase_ ( self :int , _A :Optional[Any] , _A :Dict , _A :Optional[int]=False ) -> List[Any]:
'''simple docstring'''
__A = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
__A = XLMModelTester(self )
__A = ConfigTester(self , config_class=_A , emb_dim=37 )
def lowercase_ ( self :Dict ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self :List[Any] ) -> Any:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def lowercase_ ( self :str ) -> List[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def lowercase_ ( self :Any ) -> Tuple:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def lowercase_ ( self :str ) -> str:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def lowercase_ ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def lowercase_ ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def lowercase_ ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def lowercase_ ( self :Any , _A :str , _A :str , _A :int , _A :Optional[int] , _A :Any , _A :List[Any]=False , _A :Dict=1 ) -> Optional[int]:
'''simple docstring'''
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
__A = min_length + idx + 1
__A = min_length + idx + 1
__A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def lowercase_ ( self :Optional[Any] , _A :str , _A :List[Any] , _A :str , _A :str , _A :int , _A :Union[str, Any]=False , _A :Optional[Any]=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
__A = min_length + idx + 1
__A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
@slow
def lowercase_ ( self :int ) -> str:
'''simple docstring'''
__A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_A )
__A = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
__A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__A = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 161 | 0 |
"""simple docstring"""
import cva
import numpy as np
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__):
if k in (0.04, 0.06):
__SCREAMING_SNAKE_CASE = k
__SCREAMING_SNAKE_CASE = window_size
else:
raise ValueError("""invalid k value""")
def __str__( self):
return str(self.k)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = cva.imread(lowerCAmelCase__ , 0)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = img.shape
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = img.copy()
__SCREAMING_SNAKE_CASE = cva.cvtColor(lowerCAmelCase__ , cva.COLOR_GRAY2RGB)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = np.gradient(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = dx**2
__SCREAMING_SNAKE_CASE = dy**2
__SCREAMING_SNAKE_CASE = dx * dy
__SCREAMING_SNAKE_CASE = 0.04
__SCREAMING_SNAKE_CASE = self.window_size // 2
for y in range(lowerCAmelCase__ , h - offset):
for x in range(lowerCAmelCase__ , w - offset):
__SCREAMING_SNAKE_CASE = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__SCREAMING_SNAKE_CASE = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__SCREAMING_SNAKE_CASE = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__SCREAMING_SNAKE_CASE = (wxx * wyy) - (wxy**2)
__SCREAMING_SNAKE_CASE = wxx + wyy
__SCREAMING_SNAKE_CASE = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_5_5)
return color_img, corner_list
if __name__ == "__main__":
__magic_name__ = HarrisCorner(0.04, 3)
__magic_name__, __magic_name__ = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 255 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__magic_name__ = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = {}
state_dict.pop("""pixel_mean""" , UpperCamelCase_ )
state_dict.pop("""pixel_std""" , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE = key.replace(UpperCamelCase_ , UpperCamelCase_ )
if re.match(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = int(re.match(UpperCamelCase_ , UpperCamelCase_ ).group(2 ) )
if layer_nb == 0:
__SCREAMING_SNAKE_CASE = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
__SCREAMING_SNAKE_CASE = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
__SCREAMING_SNAKE_CASE = key.replace("""layers.2""" , """proj_out""" )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="ybelkada/segment-anything" ):
__SCREAMING_SNAKE_CASE = hf_hub_download(UpperCamelCase_ , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__SCREAMING_SNAKE_CASE = SamConfig()
elif "sam_vit_l" in model_name:
__SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__SCREAMING_SNAKE_CASE = SamConfig(
vision_config=UpperCamelCase_ , )
elif "sam_vit_h" in model_name:
__SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__SCREAMING_SNAKE_CASE = SamConfig(
vision_config=UpperCamelCase_ , )
__SCREAMING_SNAKE_CASE = torch.load(UpperCamelCase_ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = replace_keys(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SamModel(UpperCamelCase_ )
hf_model.load_state_dict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = hf_model.to("""cuda""" )
__SCREAMING_SNAKE_CASE = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE = [[[400, 650]]]
__SCREAMING_SNAKE_CASE = [[1]]
__SCREAMING_SNAKE_CASE = processor(images=np.array(UpperCamelCase_ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
__SCREAMING_SNAKE_CASE = processor(
images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
__SCREAMING_SNAKE_CASE = ((75, 275, 1725, 850),)
__SCREAMING_SNAKE_CASE = processor(images=np.array(UpperCamelCase_ ) , input_boxes=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
__SCREAMING_SNAKE_CASE = [[[400, 650], [800, 650]]]
__SCREAMING_SNAKE_CASE = [[1, 1]]
__SCREAMING_SNAKE_CASE = processor(
images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__magic_name__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 255 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[int] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class __a ( A__ ):
_lowerCAmelCase : int = ['''pixel_values''']
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ : int = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE , param_name="crop_size" )
UpperCamelCase__ : Optional[Any] = do_resize
UpperCamelCase__ : Dict = size
UpperCamelCase__ : str = resample
UpperCamelCase__ : Optional[int] = do_center_crop
UpperCamelCase__ : Any = crop_size
UpperCamelCase__ : str = do_rescale
UpperCamelCase__ : List[str] = rescale_factor
UpperCamelCase__ : Optional[int] = do_normalize
UpperCamelCase__ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ : Union[str, Any] = do_convert_rgb
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCamelCase__ : Optional[int] = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, float] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[float, List[float]] , SCREAMING_SNAKE_CASE : Union[float, List[float]] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : int = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Optional[Any] = size if size is not None else self.size
UpperCamelCase__ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE , param_name="size" , default_to_square=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = resample if resample is not None else self.resample
UpperCamelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" , default_to_square=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std
UpperCamelCase__ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ : Optional[Any] = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ : Union[str, Any] = [convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ : int = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCamelCase__ : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCamelCase__ : Tuple = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : Union[str, Any] = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCamelCase__ : Union[str, Any] = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : List[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE ) | 189 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod() | 189 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
_A : str = [0] * len(snake_case_ )
_A : Optional[int] = []
_A : List[Any] = []
_A : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case_ ) ):
if indegree[i] == 0:
queue.append(snake_case_ )
while queue:
_A : Union[str, Any] = queue.pop(0 )
cnt += 1
topo.append(snake_case_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(snake_case_ )
if cnt != len(snake_case_ ):
print("""Cycle exists""" )
else:
print(snake_case_ )
# Adjacency List of Graph
_snake_case = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 343 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''microsoft/speecht5_tts'''
lowerCAmelCase_ = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
lowerCAmelCase_ = '''text_reader'''
lowerCAmelCase_ = SpeechTaProcessor
lowerCAmelCase_ = SpeechTaForTextToSpeech
lowerCAmelCase_ = SpeechTaHifiGan
lowerCAmelCase_ = ['''text''']
lowerCAmelCase_ = ['''audio''']
def _snake_case ( self ):
"""simple docstring"""
if self.post_processor is None:
lowercase_ : str = '''microsoft/speecht5_hifigan'''
super().setup()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : Tuple = self.pre_processor(text=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , truncation=__SCREAMING_SNAKE_CASE )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
lowercase_ : Optional[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
lowercase_ : List[str] = torch.tensor(embeddings_dataset[73_05]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(__SCREAMING_SNAKE_CASE ).cpu().detach()
| 93 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 768 , ):
super().__init__()
lowercase__ : List[str] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Union[str, torch.device]] = None , SCREAMING_SNAKE_CASE : Optional[torch.dtype] = None , ):
lowercase__ : Union[str, Any] = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
return self
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = (embeds * self.std) + self.mean
return embeds
| 130 | 0 |
import torch
from torch import nn
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : int=1 , lowerCamelCase_ : Union[str, Any]=False ):
"""simple docstring"""
super().__init__()
UpperCamelCase = n_token
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [n_token]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase_ , lowerCamelCase_ ) ) )
else:
self.out_projs.append(lowerCamelCase_ )
self.out_layers.append(nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCamelCase_ , lowerCamelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCamelCase_ , r_idx - l_idx ) )
UpperCamelCase = keep_order
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
if proj is None:
UpperCamelCase = nn.functional.linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase = nn.functional.linear(lowerCamelCase_ , proj.t().contiguous() )
UpperCamelCase = nn.functional.linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : str=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase = hidden[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(lowerCamelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase = labels != -100
UpperCamelCase = torch.zeros_like(lowerCamelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = (
-nn.functional.log_softmax(lowerCamelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCamelCase_ )
biases.append(lowerCamelCase_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase_ , dim=1 )
if labels is None:
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase = torch.zeros_like(lowerCamelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = 0
UpperCamelCase = [0] + self.cutoffs
for i in range(len(lowerCamelCase_ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase = labels.index_select(0 , lowerCamelCase_ ) - l_idx
UpperCamelCase = head_logprob.index_select(0 , lowerCamelCase_ )
UpperCamelCase = hidden.index_select(0 , lowerCamelCase_ )
else:
UpperCamelCase = hidden
if i == 0:
if labels is not None:
UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase_ , dim=1 )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCamelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(lowerCamelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCamelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCamelCase_ )
biases.append(lowerCamelCase_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase_ , dim=1 )
UpperCamelCase = [0] + self.cutoffs
for i in range(len(lowerCamelCase_ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = nn.functional.log_softmax(lowerCamelCase_ , dim=1 )
UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase = logprob_i
return out
| 366 | import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_SCREAMING_SNAKE_CASE = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_SCREAMING_SNAKE_CASE = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
_SCREAMING_SNAKE_CASE = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 165 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = 0
for i in range(1 , 1_001 ):
total += i**i
return str(_UpperCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 31 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[Any] = FLAX_MODEL_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : str = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 32 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase :
def __init__( self , A_ , A_=2 , A_=3 , A_=4 , A_=2 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=36 , A_=3 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=6 , A_=6 , A_=3 , A_=4 , A_=None , A_=1_000 , ) -> str:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = text_seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = coordinate_size
UpperCamelCase = shape_size
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
UpperCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase = text_seq_length
UpperCamelCase = (image_size // patch_size) ** 2 + 1
UpperCamelCase = self.text_seq_length + self.image_seq_length
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = LayoutLMvaModel(config=A_ )
model.to(A_ )
model.eval()
# text + image
UpperCamelCase = model(A_ , pixel_values=A_ )
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase = model(A_ , bbox=A_ , pixel_values=A_ , token_type_ids=A_ )
UpperCamelCase = model(A_ , bbox=A_ , pixel_values=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase = model(pixel_values=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LayoutLMvaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LayoutLMvaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = LayoutLMvaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Tuple = False
__lowercase : List[Any] = False
__lowercase : str = False
__lowercase : Tuple = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : List[str] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = LayoutLMvaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self , A_ , A_ , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(A_ )
if model_class in get_values(A_ ):
UpperCamelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(A_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A_ ):
UpperCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in get_values(A_ ):
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in [
*get_values(A_ ),
]:
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in [
*get_values(A_ ),
]:
UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=A_ , )
return inputs_dict
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LayoutLMvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
UpperCamelCase = torch.tensor([[1, 2]] )
UpperCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCamelCase = model(
input_ids=input_ids.to(A_ ) , bbox=bbox.to(A_ ) , pixel_values=pixel_values.to(A_ ) , )
# verify the logits
UpperCamelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , A_ )
UpperCamelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1e-4 ) )
| 110 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : torch.FloatTensor
class lowercase ( nn.Module ):
def __init__( self , A_=3 , A_=3 , A_=("DownEncoderBlock2D",) , A_=(64,) , A_=2 , A_=32 , A_="silu" , A_=True , ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = layers_per_block
UpperCamelCase = torch.nn.Convad(
A_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([] )
# down
UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
UpperCamelCase = output_channel
UpperCamelCase = block_out_channels[i]
UpperCamelCase = i == len(A_ ) - 1
UpperCamelCase = get_down_block(
A_ , num_layers=self.layers_per_block , in_channels=A_ , out_channels=A_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , )
self.down_blocks.append(A_ )
# mid
UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , )
# out
UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=A_ , eps=1e-6 )
UpperCamelCase = nn.SiLU()
UpperCamelCase = 2 * out_channels if double_z else out_channels
UpperCamelCase = nn.Convad(block_out_channels[-1] , A_ , 3 , padding=1 )
UpperCamelCase = False
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = x
UpperCamelCase = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ ):
def custom_forward(*A_ ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) , A_ , use_reentrant=A_ )
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A_ , use_reentrant=A_ )
else:
for down_block in self.down_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) , A_ )
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , A_ )
else:
# down
for down_block in self.down_blocks:
UpperCamelCase = down_block(A_ )
# middle
UpperCamelCase = self.mid_block(A_ )
# post-process
UpperCamelCase = self.conv_norm_out(A_ )
UpperCamelCase = self.conv_act(A_ )
UpperCamelCase = self.conv_out(A_ )
return sample
class lowercase ( nn.Module ):
def __init__( self , A_=3 , A_=3 , A_=("UpDecoderBlock2D",) , A_=(64,) , A_=2 , A_=32 , A_="silu" , A_="group" , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase = layers_per_block
UpperCamelCase = nn.Convad(
A_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([] )
UpperCamelCase = in_channels if norm_type == 'spatial' else None
# mid
UpperCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=A_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=A_ , temb_channels=A_ , )
# up
UpperCamelCase = list(reversed(A_ ) )
UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
UpperCamelCase = output_channel
UpperCamelCase = reversed_block_out_channels[i]
UpperCamelCase = i == len(A_ ) - 1
UpperCamelCase = get_up_block(
A_ , num_layers=self.layers_per_block + 1 , in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=A_ , resnet_groups=A_ , attention_head_dim=A_ , temb_channels=A_ , resnet_time_scale_shift=A_ , )
self.up_blocks.append(A_ )
UpperCamelCase = output_channel
# out
if norm_type == "spatial":
UpperCamelCase = SpatialNorm(block_out_channels[0] , A_ )
else:
UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=A_ , eps=1e-6 )
UpperCamelCase = nn.SiLU()
UpperCamelCase = nn.Convad(block_out_channels[0] , A_ , 3 , padding=1 )
UpperCamelCase = False
def __UpperCamelCase ( self , A_ , A_=None ) -> Dict:
"""simple docstring"""
UpperCamelCase = z
UpperCamelCase = self.conv_in(A_ )
UpperCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ ):
def custom_forward(*A_ ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A_ , A_ , use_reentrant=A_ )
UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) , A_ , A_ , use_reentrant=A_ )
else:
# middle
UpperCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , A_ , A_ )
UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) , A_ , A_ )
else:
# middle
UpperCamelCase = self.mid_block(A_ , A_ )
UpperCamelCase = sample.to(A_ )
# up
for up_block in self.up_blocks:
UpperCamelCase = up_block(A_ , A_ )
# post-process
if latent_embeds is None:
UpperCamelCase = self.conv_norm_out(A_ )
else:
UpperCamelCase = self.conv_norm_out(A_ , A_ )
UpperCamelCase = self.conv_act(A_ )
UpperCamelCase = self.conv_out(A_ )
return sample
class lowercase ( nn.Module ):
def __init__( self , A_ , A_ , A_ , A_=None , A_="random" , A_=False , A_=True ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase = n_e
UpperCamelCase = vq_embed_dim
UpperCamelCase = beta
UpperCamelCase = legacy
UpperCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCamelCase = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
UpperCamelCase = self.used.shape[0]
UpperCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCamelCase = self.re_embed
UpperCamelCase = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
UpperCamelCase = n_e
UpperCamelCase = sane_index_shape
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = inds.shape
assert len(A_ ) > 1
UpperCamelCase = inds.reshape(ishape[0] , -1 )
UpperCamelCase = self.used.to(A_ )
UpperCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
UpperCamelCase = match.argmax(-1 )
UpperCamelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCamelCase = self.unknown_index
return new.reshape(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = inds.shape
assert len(A_ ) > 1
UpperCamelCase = inds.reshape(ishape[0] , -1 )
UpperCamelCase = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
UpperCamelCase = 0 # simply set to zero
UpperCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , A_ )
return back.reshape(A_ )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
# reshape z -> (batch, height, width, channel) and flatten
UpperCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCamelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCamelCase = torch.argmin(torch.cdist(A_ , self.embedding.weight ) , dim=1 )
UpperCamelCase = self.embedding(A_ ).view(z.shape )
UpperCamelCase = None
UpperCamelCase = None
# compute loss for embedding
if not self.legacy:
UpperCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCamelCase = self.remap_to_used(A_ )
UpperCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCamelCase = self.unmap_to_all(A_ )
UpperCamelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCamelCase = self.embedding(A_ )
if shape is not None:
UpperCamelCase = z_q.view(A_ )
# reshape back to match original input shape
UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_=False ) -> Any:
"""simple docstring"""
UpperCamelCase = parameters
UpperCamelCase , UpperCamelCase = torch.chunk(A_ , 2 , dim=1 )
UpperCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCamelCase = deterministic
UpperCamelCase = torch.exp(0.5 * self.logvar )
UpperCamelCase = torch.exp(self.logvar )
if self.deterministic:
UpperCamelCase = UpperCamelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , A_ = None ) -> torch.FloatTensor:
"""simple docstring"""
# make sure sample is on the same device as the parameters and has same dtype
UpperCamelCase = randn_tensor(
self.mean.shape , generator=A_ , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCamelCase = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , A_=None ) -> Tuple:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , A_ , A_=[1, 2, 3] ) -> Optional[Any]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCamelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.mean
| 110 | 1 |
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase : int = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_UpperCamelCase )
if number < 1:
lowerCAmelCase : Optional[int] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_UpperCamelCase )
lowerCAmelCase : List[str] = 1
for i in range(1 , _UpperCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCamelCase : Union[str, Any] = TypeVar("KT")
lowerCamelCase : Dict = TypeVar("VT")
class A__ ( Generic[KT, VT] ):
def __init__( self : str , _a : KT | str = "root" , _a : VT | None = None ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =key
_SCREAMING_SNAKE_CASE =value
_SCREAMING_SNAKE_CASE =[]
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return f"Node({self.key}: {self.value})"
@property
def A ( self : int ) -> int:
'''simple docstring'''
return len(self.forward )
class A__ ( Generic[KT, VT] ):
def __init__( self : Optional[Any] , _a : float = 0.5 , _a : int = 16 ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Node[KT, VT]()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =p
_SCREAMING_SNAKE_CASE =max_level
def __str__( self : Tuple ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =list(self )
if len(_a ) == 0:
return f"SkipList(level={self.level})"
_SCREAMING_SNAKE_CASE =max((len(str(_a ) ) for item in items) , default=4 )
_SCREAMING_SNAKE_CASE =max(_a , 4 ) + 4
_SCREAMING_SNAKE_CASE =self.head
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =node.forward.copy()
lines.append(f"[{node.key}]".ljust(_a , '-' ) + '* ' * len(_a ) )
lines.append(' ' * label_size + '| ' * len(_a ) )
while len(node.forward ) != 0:
_SCREAMING_SNAKE_CASE =node.forward[0]
lines.append(
f"[{node.key}]".ljust(_a , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(_a ) )
_SCREAMING_SNAKE_CASE =node.forward
lines.append('None'.ljust(_a ) + '* ' * len(_a ) )
return f"SkipList(level={self.level})\n" + "\n".join(_a )
def __iter__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_SCREAMING_SNAKE_CASE =node.forward[0]
def A ( self : List[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =1
while random() < self.p and level < self.max_level:
level += 1
return level
def A ( self : Any , _a : Any ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_SCREAMING_SNAKE_CASE =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_a )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def A ( self : Union[str, Any] , _a : KT ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._locate_node(_a )
if node is not None:
for i, update_node in enumerate(_a ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_SCREAMING_SNAKE_CASE =node.forward[i]
else:
_SCREAMING_SNAKE_CASE =update_node.forward[:i]
def A ( self : Optional[Any] , _a : KT , _a : VT ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._locate_node(_a )
if node is not None:
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _a ):
update_vector.append(self.head )
_SCREAMING_SNAKE_CASE =level
_SCREAMING_SNAKE_CASE =Node(_a , _a )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_a )
else:
_SCREAMING_SNAKE_CASE =new_node
def A ( self : List[str] , _a : VT ) -> VT | None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._locate_node(_a )
if node is not None:
return node.value
return None
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
_SCREAMING_SNAKE_CASE =skip_list.head
_SCREAMING_SNAKE_CASE ={}
while node.level != 0:
_SCREAMING_SNAKE_CASE =node.forward[0]
_SCREAMING_SNAKE_CASE =node.value
assert len(_UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
_SCREAMING_SNAKE_CASE =skip_list.head
_SCREAMING_SNAKE_CASE ={}
while node.level != 0:
_SCREAMING_SNAKE_CASE =node.forward[0]
_SCREAMING_SNAKE_CASE =node.value
if len(_UpperCamelCase ) != 4:
print()
assert len(_UpperCamelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
assert skip_list.find('Some key' ) is None
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 1_42 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(_UpperCamelCase : Dict ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
def is_sorted(_UpperCamelCase : str ):
return all(next_item >= item for item, next_item in zip(_UpperCamelCase , lst[1:] ) )
_SCREAMING_SNAKE_CASE =SkipList()
for i in range(10 ):
skip_list.insert(_UpperCamelCase , _UpperCamelCase )
assert is_sorted(list(_UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCamelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCamelCase ) )
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 47 | 0 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Optional[int] = ''''''
SCREAMING_SNAKE_CASE : List[str] = ''''''
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
def lowercase ( _snake_case : str ) ->None:
"""simple docstring"""
__snake_case : Dict = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[Any] = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
__snake_case : Optional[int] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__snake_case : Tuple = api.user_timeline(screen_name=_UpperCAmelCase , count=200 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
__snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
__snake_case : str = api.user_timeline(
screen_name=_UpperCAmelCase , count=200 , max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
__snake_case : Union[str, Any] = alltweets[-1].id - 1
print(f"""...{len(_UpperCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
__snake_case : Any = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
__snake_case : int = csv.writer(_UpperCAmelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 350 |
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
def _A ( lowercase__ ):
lowercase__ = 2
lowercase__ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase__ )
if n > 1:
factors.append(lowercase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spm_char.model"}
__A = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__A = {
"microsoft/speecht5_asr": 1_024,
"microsoft/speecht5_tts": 1_024,
"microsoft/speecht5_vc": 1_024,
}
class A ( __UpperCAmelCase ):
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.sp_model.get_piece_size()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
lowercase__ = self.sp_model.IdToPiece(lowerCamelCase__ )
return token
def A__ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = []
lowercase__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
lowercase__ = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
lowercase__ = [1]
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + suffix_ones
return ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 164 | 1 |
"""simple docstring"""
import sys
from pathlib import Path
__magic_name__ = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__magic_name__ = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
__magic_name__ = "zero2"
__magic_name__ = "zero3"
__magic_name__ = [ZEROa, ZEROa]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__SCREAMING_SNAKE_CASE = parameterized.to_safe_name("""_""".join(str(UpperCamelCase_ ) for x in param.args ) )
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
__magic_name__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
def snake_case_ ( self , lowerCAmelCase__):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1_0 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ):
__SCREAMING_SNAKE_CASE = models[model]
__SCREAMING_SNAKE_CASE = self.run_trainer(
stage=lowerCAmelCase__ , model_name=lowerCAmelCase__ , eval_steps=lowerCAmelCase__ , num_train_epochs=1 , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
self.do_checks(lowerCAmelCase__)
return output_dir
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1_0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ):
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir("""./xxx""" , after=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCAmelCase__)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["""--fp16"""])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__SCREAMING_SNAKE_CASE = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
__SCREAMING_SNAKE_CASE = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
__SCREAMING_SNAKE_CASE = self.get_launcher(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env())
return output_dir
def snake_case_ ( self , lowerCAmelCase__=False):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__SCREAMING_SNAKE_CASE = min(2 , get_gpu_count()) if distributed else 1
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 354 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
while b:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = b, a % b
return a
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase_ , a % b )
def _lowerCAmelCase ( ):
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 255 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = 'deberta-v2'
def __init__( self: Optional[Any] , UpperCamelCase_: Union[str, Any]=12_81_00 , UpperCamelCase_: Optional[int]=15_36 , UpperCamelCase_: str=24 , UpperCamelCase_: Optional[Any]=24 , UpperCamelCase_: int=61_44 , UpperCamelCase_: Dict="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: Tuple=1E-7 , UpperCamelCase_: List[Any]=False , UpperCamelCase_: Any=-1 , UpperCamelCase_: Tuple=0 , UpperCamelCase_: str=True , UpperCamelCase_: Any=None , UpperCamelCase_: List[Any]=0 , UpperCamelCase_: str="gelu" , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = relative_attention
__lowerCamelCase = max_relative_positions
__lowerCamelCase = pad_token_id
__lowerCamelCase = position_biased_input
# Backwards compatibility
if type(UpperCamelCase_ ) == str:
__lowerCamelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )]
__lowerCamelCase = pos_att_type
__lowerCamelCase = vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = kwargs.get("""pooler_hidden_size""" , UpperCamelCase_ )
__lowerCamelCase = pooler_dropout
__lowerCamelCase = pooler_hidden_act
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: int ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowerCAmelCase__ ( self: Any ):
return 12
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional["TensorType"] = None , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 40 , UpperCamelCase_: int = 40 , UpperCamelCase_: "PreTrainedTokenizerBase" = None , ):
__lowerCamelCase = super().generate_dummy_inputs(preprocessor=UpperCamelCase_ , framework=UpperCamelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 12 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A =logging.get_logger(__name__)
class _a ( __a ):
__a : str = ["""pixel_values"""]
def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase )
def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase ):
UpperCAmelCase = target_sizes.numpy()
UpperCAmelCase = []
for idx in range(len(lowercase ) ):
UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase )
UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase = logits.argmax(dim=1 )
UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 3.0
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=_A ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCAmelCase : Union[str, Any] = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__UpperCAmelCase : int = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCAmelCase : Union[str, Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _A )
@require_multi_gpu
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ : Any = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase__ : Any = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase__ : List[str] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase__ : int = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase__ : int = ""
lowerCAmelCase__ : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 356 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _UpperCAmelCase ):
if not nums:
raise ValueError("List is empty" )
return sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 0 |
import random
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ) -> Optional[int]:
A_ : Union[str, Any] = a[left_index]
A_ : int = left_index + 1
for j in range(left_index + 1 , _lowerCAmelCase ):
if a[j] < pivot:
A_ , A_ : Optional[Any] = a[i], a[j]
i += 1
A_ , A_ : Optional[int] = a[i - 1], a[left_index]
return i - 1
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ) -> Any:
if left < right:
A_ : int = random.randint(_lowerCAmelCase , right - 1 )
A_ , A_ : List[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
A_ : Dict = partition(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
quick_sort_random(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_lowerCAmelCase , pivot_index + 1 , _lowerCAmelCase ) # recursive quicksort to the right of the pivot point
def __snake_case ( ) -> List[str]:
A_ : Optional[Any] = input("Enter numbers separated by a comma:\n" ).strip()
A_ : Optional[Any] = [int(_lowerCAmelCase ) for item in user_input.split("," )]
quick_sort_random(_lowerCAmelCase , 0 , len(_lowerCAmelCase ) )
print(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 300 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :str ):
'''simple docstring'''
A_ : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : int = scheduler_class(**snake_case )
A_ : Tuple = len(snake_case )
A_ : List[str] = self.dummy_model()
A_ : Optional[Any] = self.dummy_sample_deter
A_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Tuple = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : Optional[int] = pred_prev_sample
A_ : Tuple = torch.sum(torch.abs(snake_case ) )
A_ : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[int] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(prediction_type="v_prediction" )
A_ : List[str] = scheduler_class(**snake_case )
A_ : int = len(snake_case )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Any = torch.manual_seed(0 )
for t in reversed(range(snake_case ) ):
# 1. predict noise residual
A_ : Optional[int] = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : List[str] = pred_prev_sample
A_ : Optional[Any] = torch.sum(torch.abs(snake_case ) )
A_ : List[str] = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**snake_case )
A_ : Optional[int] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case )
A_ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(snake_case ):
if i == len(snake_case ) - 1:
A_ : str = -1
else:
A_ : List[str] = timesteps[i + 1]
A_ : Optional[int] = scheduler.previous_timestep(snake_case )
A_ : List[str] = prev_t.item()
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**snake_case )
A_ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Union[str, Any] = [100, 87, 50, 1, 0]
A_ : Optional[int] = len(snake_case )
with self.assertRaises(snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**snake_case )
A_ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case )
| 300 | 1 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main() | 31 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model | 31 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = '''marian'''
snake_case__ : Dict = ['''past_key_values''']
snake_case__ : Union[str, Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=5_8_1_0_1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=4_0_9_6 , SCREAMING_SNAKE_CASE__ : List[Any]=1_6 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=4_0_9_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : str=5_8_1_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_8_1_0_0 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Union[str, Any]:
a_ : Any = vocab_size
a_ : int = decoder_vocab_size or vocab_size
a_ : List[str] = max_position_embeddings
a_ : List[Any] = d_model
a_ : int = encoder_ffn_dim
a_ : List[str] = encoder_layers
a_ : Optional[Any] = encoder_attention_heads
a_ : List[str] = decoder_ffn_dim
a_ : Any = decoder_layers
a_ : List[Any] = decoder_attention_heads
a_ : List[Any] = dropout
a_ : Union[str, Any] = attention_dropout
a_ : Any = activation_dropout
a_ : List[str] = activation_function
a_ : str = init_std
a_ : int = encoder_layerdrop
a_ : Union[str, Any] = decoder_layerdrop
a_ : Any = use_cache
a_ : str = encoder_layers
a_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
a_ : Tuple = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a_ : str = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
a_ : Tuple = {0: 'batch'}
a_ : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a_ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
a_ : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
a_ : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
a_ , a_ : Any = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
a_ : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
a_ : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
a_ : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a_ : Optional[Any] = super().outputs
else:
a_ : Optional[int] = super(SCREAMING_SNAKE_CASE__ , self ).outputs
if self.use_past:
a_ , a_ : Optional[int] = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
a_ : Optional[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
a_ : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
a_ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Generate decoder inputs
a_ : Tuple = seq_length if not self.use_past else 1
a_ : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
a_ : Tuple = dict(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
a_ , a_ : int = common_inputs['input_ids'].shape
a_ : Any = common_inputs['decoder_input_ids'].shape[1]
a_ , a_ : List[str] = self.num_attention_heads
a_ : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ : Tuple = decoder_seq_length + 3
a_ : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a_ : Optional[Any] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , dim=1 )
a_ : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a_ , a_ : Tuple = self.num_layers
a_ : List[Any] = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - min_num_layers
a_ : List[Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
) )
# TODO: test this.
a_ : List[Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
a_ : int = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
a_ , a_ : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
a_ : List[Any] = seqlen + 2
a_ , a_ : Optional[int] = self.num_layers
a_ , a_ : str = self.num_attention_heads
a_ : List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ : Tuple = common_inputs['attention_mask'].dtype
a_ : Dict = torch.cat(
[common_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
a_ : Dict = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(SCREAMING_SNAKE_CASE__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a_ : Dict = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a_ : str = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
a_ : Optional[Any] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
a_ : Dict = dict(tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
a_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
else:
a_ : Dict = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
if self.task in ["default", "seq2seq-lm"]:
a_ : Dict = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
a_ : Optional[int] = super(SCREAMING_SNAKE_CASE__ , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1E-4
| 32 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = HfArgumentParser(__A )
a_ : Optional[int] = parser.parse_args_into_dataclasses()[0]
a_ : List[Any] = TensorFlowBenchmark(args=__A )
try:
a_ : List[str] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a_ : Dict = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
a_ : Dict = ' '.join(str(__A ).split(' ' )[:-1] )
a_ : int = ''
a_ : int = eval(str(__A ).split(' ' )[-1] )
a_ : Any = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__A )
if len(__A ) > 0:
a_ : str = full_error_msg + begin_error_msg + str(__A )
raise ValueError(__A )
benchmark.run()
if __name__ == "__main__":
main()
| 32 | 1 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCamelCase ( A__ , A__ ) -> List[str]:
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
UpperCamelCase = features.copy()
UpperCamelCase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase = jsonl_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase = [jsonl_path]
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowerCamelCase ( A__ , A__ , A__=("train",) ) -> Tuple:
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase = JsonDatasetReader({'train': jsonl_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = features.copy() if features else default_expected_features
UpperCamelCase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase = JsonDatasetReader({'train': jsonl_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
if split:
UpperCamelCase = {split: jsonl_path}
else:
UpperCamelCase = 'train'
UpperCamelCase = {'train': jsonl_path, 'test': jsonl_path}
UpperCamelCase = tmp_path / 'cache'
UpperCamelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
return json.load(lowerCAmelCase__ )
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
return [json.loads(lowerCAmelCase__ ) for line in buffer]
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def A ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ ).write()
buffer.seek(0 )
UpperCamelCase = load_json_function(A__ )
assert isinstance(A__ , A__ )
assert isinstance(exported_content[0] , A__ )
assert len(A__ ) == 1_0
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , orient=A__ ).write()
buffer.seek(0 )
UpperCamelCase = load_json(A__ )
assert isinstance(A__ , A__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(A__ ) == 1_0
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def A ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase = load_json_function(A__ )
assert isinstance(A__ , A__ )
assert isinstance(exported_content[0] , A__ )
assert len(A__ ) == 1_0
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def A ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , orient=A__ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase = load_json(A__ )
assert isinstance(A__ , A__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(A__ ) == 1_0
def A ( self : List[Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with pytest.raises(A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def A ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = tmp_path_factory.mktemp('data' ) / f"""test.json.{extension}"""
UpperCamelCase = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(A__ , A__ , compression=A__ ).write()
with fsspec.open(A__ , 'rb' , compression='infer' ) as f:
UpperCamelCase = f.read()
with fsspec.open(A__ , 'rb' , compression='infer' ) as f:
UpperCamelCase = f.read()
assert exported_content == original_content
| 359 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase , UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
UpperCamelCase = np.array(A__ ).astype(np.floataa ) / 255.0
UpperCamelCase = image[None].transpose(0 , 3 , 1 , 2 )
UpperCamelCase = torch.from_numpy(A__ )
return 2.0 * image - 1.0
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : VQModel , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : Optional[int] = 1_0_0 , UpperCamelCase__ : Optional[float] = 0.0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
UpperCamelCase = 1
elif isinstance(UpperCamelCase__ , torch.Tensor ):
UpperCamelCase = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase__ )}""" )
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
UpperCamelCase = preprocess(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCamelCase = next(self.unet.parameters() ).dtype
UpperCamelCase = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
UpperCamelCase = image.to(device=self.device , dtype=UpperCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
UpperCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for t in self.progress_bar(UpperCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
UpperCamelCase = torch.cat([latents, image] , dim=1 )
UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
UpperCamelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
UpperCamelCase = self.vqvae.decode(UpperCamelCase__ ).sample
UpperCamelCase = torch.clamp(UpperCamelCase__ , -1.0 , 1.0 )
UpperCamelCase = image / 2 + 0.5
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 249 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_UpperCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = state_dict.pop(lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = val
def lowercase_ ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCAmelCase : Optional[Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
__UpperCAmelCase : str = value
else:
__UpperCAmelCase : Optional[int] = value
return new_state_dict
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCAmelCase : int = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__UpperCAmelCase : Union[str, Any] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Tuple = in_proj_weight[:256, :]
__UpperCAmelCase : List[Any] = in_proj_bias[:256]
__UpperCAmelCase : Optional[Any] = in_proj_weight[256:512, :]
__UpperCAmelCase : Tuple = in_proj_bias[256:512]
__UpperCAmelCase : str = in_proj_weight[-256:, :]
__UpperCAmelCase : Any = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : List[str] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__UpperCAmelCase : int = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Dict = in_proj_weight[:256, :]
__UpperCAmelCase : Optional[int] = in_proj_bias[:256]
__UpperCAmelCase : Any = in_proj_weight[256:512, :]
__UpperCAmelCase : Tuple = in_proj_bias[256:512]
__UpperCAmelCase : List[Any] = in_proj_weight[-256:, :]
__UpperCAmelCase : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__UpperCAmelCase : List[Any] = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
__UpperCAmelCase : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__UpperCAmelCase : Optional[Any] = in_proj_weight_cross_attn[:256, :]
__UpperCAmelCase : int = in_proj_bias_cross_attn[:256]
__UpperCAmelCase : Optional[int] = in_proj_weight_cross_attn[256:512, :]
__UpperCAmelCase : Any = in_proj_bias_cross_attn[256:512]
__UpperCAmelCase : Optional[Any] = in_proj_weight_cross_attn[-256:, :]
__UpperCAmelCase : int = in_proj_bias_cross_attn[-256:]
def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = image.size
__UpperCAmelCase : Any = max(lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : str = 800 if """detection""" in checkpoint_url else 1000
__UpperCAmelCase : Tuple = target_max_size / current_max_size
__UpperCAmelCase : Dict = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowercase_ ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = F.to_tensor(lowerCAmelCase__ )
__UpperCAmelCase : str = F.normalize(lowerCAmelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
__UpperCAmelCase : int = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : Tuple = rename_backbone_keys(lowerCAmelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCAmelCase : Dict = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__UpperCAmelCase : List[str] = state_dict.pop(lowerCAmelCase__ )
__UpperCAmelCase : Optional[Any] = val
# create HuggingFace model and load state dict
__UpperCAmelCase : Optional[int] = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__UpperCAmelCase : Dict = 15
__UpperCAmelCase : Dict = 2
__UpperCAmelCase : Any = {0: """table""", 1: """table rotated"""}
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
else:
__UpperCAmelCase : Optional[Any] = 125
__UpperCAmelCase : List[Any] = 6
__UpperCAmelCase : List[Any] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : List[str] = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
__UpperCAmelCase : Any = TableTransformerForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# verify our conversion
__UpperCAmelCase : List[Any] = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
__UpperCAmelCase : Union[str, Any] = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=lowerCAmelCase__ )
__UpperCAmelCase : str = Image.open(lowerCAmelCase__ ).convert("""RGB""" )
__UpperCAmelCase : List[Any] = normalize(resize(lowerCAmelCase__ , lowerCAmelCase__ ) ).unsqueeze(0 )
__UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ )
if "detection" in checkpoint_url:
__UpperCAmelCase : List[str] = (1, 15, 3)
__UpperCAmelCase : List[str] = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
__UpperCAmelCase : Tuple = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
__UpperCAmelCase : str = (1, 125, 7)
__UpperCAmelCase : List[str] = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
__UpperCAmelCase : Dict = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
__UpperCAmelCase : Dict = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(lowerCAmelCase__ )
image_processor.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_UpperCamelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 254 |
'''simple docstring'''
import qiskit
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__UpperCAmelCase : Any = qiskit.QuantumCircuit(lowerCAmelCase__ , lowerCAmelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__UpperCAmelCase : int = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
| 254 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = ['input_values', 'padding_mask']
def __init__( self : List[Any] ,__lowerCamelCase : int = 1 ,__lowerCamelCase : int = 2_40_00 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : float = None ,__lowerCamelCase : float = None ,**__lowerCamelCase : Tuple ,):
'''simple docstring'''
super().__init__(feature_size=__lowerCamelCase ,sampling_rate=__lowerCamelCase ,padding_value=__lowerCamelCase ,**__lowerCamelCase )
a = chunk_length_s
a = overlap
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Optional[int] ,__lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,__lowerCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None ,__lowerCamelCase : Optional[bool] = False ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : Optional[int] = None ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
a = True
a = bool(
isinstance(__lowerCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
a = [np.asarray(__lowerCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__lowerCamelCase ,np.ndarray ):
a = np.asarray(__lowerCamelCase ,dtype=np.floataa )
elif isinstance(__lowerCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
a = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray(__lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__lowerCamelCase ):
if example.ndim > 2:
raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" )
a = None
a = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
a = min(array.shape[0] for array in raw_audio )
a = int(np.floor(max_length / self.chunk_stride ) )
a = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
a = max(array.shape[0] for array in raw_audio )
a = int(np.ceil(max_length / self.chunk_stride ) )
a = (nb_step - 1) * self.chunk_stride + self.chunk_length
a = '''max_length'''
else:
a = input_values
# normal padding on batch
if padded_inputs is None:
a = self.pad(
__lowerCamelCase ,max_length=__lowerCamelCase ,truncation=__lowerCamelCase ,padding=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,)
if padding:
a = padded_inputs.pop('''attention_mask''' )
a = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
a = example[..., None]
input_values.append(example.T )
a = input_values
if return_tensors is not None:
a = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 352 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ : Union[str, Any] = 16
UpperCamelCase__ : Dict = 32
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = 1_6 ) -> Tuple:
"""simple docstring"""
a = AutoTokenizer.from_pretrained('''bert-base-cased''' )
a = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(snake_case_ ):
# max_length=None => use the model max length (it's actually the default)
a = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a = datasets.map(
snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(snake_case_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a = 1_6
elif accelerator.mixed_precision != "no":
a = 8
else:
a = None
return tokenizer.pad(
snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', )
# Instantiate dataloaders.
a = DataLoader(
tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
a = DataLoader(
tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ : int = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', snake_case_ ) == "1":
a = 2
# Initialize accelerator
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config['''lr''']
a = int(config['''num_epochs'''] )
a = int(config['''seed'''] )
a = int(config['''batch_size'''] )
a = evaluate.load('''glue''', '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case_ )
def inner_training_loop(snake_case_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Instantiate optimizer
a = AdamW(params=model.parameters(), lr=snake_case_ )
a , a = get_dataloaders(snake_case_, snake_case_ )
# Instantiate scheduler
a = get_linear_schedule_with_warmup(
optimizer=snake_case_, num_warmup_steps=1_0_0, num_training_steps=(len(snake_case_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a = model(**snake_case_ )
a = outputs.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a = model(**snake_case_ )
a = outputs.logits.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case_, references=snake_case_, )
a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", snake_case_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
a = parser.parse_args()
a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(snake_case_, snake_case_ )
if __name__ == "__main__":
main()
| 330 | 0 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('Input value must be an \'int\' type' )
lowercase : str = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 255 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
_enforce_args(_UpperCAmelCase , _UpperCAmelCase )
if n == 0:
return 0
lowercase : Tuple = float('-inf' )
for i in range(1 , n + 1 ):
lowercase : Union[str, Any] = max(
_UpperCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , _UpperCAmelCase ) )
return max_revue
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
_enforce_args(_UpperCAmelCase , _UpperCAmelCase )
lowercase : Any = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase : Optional[int] = float('-inf' )
for i in range(1 , n + 1 ):
lowercase : Dict = max(
_UpperCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _UpperCAmelCase , _UpperCAmelCase ) , )
lowercase : int = max_revenue
return max_rev[n]
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
_enforce_args(_UpperCAmelCase , _UpperCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase : int = [float('-inf' ) for _ in range(n + 1 )]
lowercase : Union[str, Any] = 0
for i in range(1 , n + 1 ):
lowercase : Any = max_rev[i]
for j in range(1 , i + 1 ):
lowercase : Optional[int] = max(_UpperCAmelCase , prices[j - 1] + max_rev[i - j] )
lowercase : Optional[Any] = max_revenue_i
return max_rev[n]
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if n < 0:
lowercase : Tuple = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(_UpperCAmelCase )
if n > len(_UpperCAmelCase ):
lowercase : Dict = (
'Each integral piece of rod must have a corresponding price. '
f'''Got n = {n} but length of prices = {len(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
def lowercase__ ( ) -> str:
'''simple docstring'''
lowercase : Optional[Any] = [6, 10, 12, 15, 20, 23]
lowercase : Optional[Any] = len(_UpperCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase : List[Any] = 36
lowercase : Union[str, Any] = top_down_cut_rod(_UpperCAmelCase , _UpperCAmelCase )
lowercase : Optional[Any] = bottom_up_cut_rod(_UpperCAmelCase , _UpperCAmelCase )
lowercase : List[str] = naive_cut_rod_recursive(_UpperCAmelCase , _UpperCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 255 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> Optional[int]:
__lowerCAmelCase = []
__lowerCAmelCase = set({"""(""", """[""", """{"""} )
__lowerCAmelCase = set({""")""", """]""", """}"""} )
__lowerCAmelCase = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowercase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase ) == 0 or (len(lowercase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase ) == 0
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = input("""Enter sequence of brackets: """ )
if is_balanced(lowercase ):
print(lowercase , """is balanced""" )
else:
print(lowercase , """is not balanced""" )
if __name__ == "__main__":
main()
| 46 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class _UpperCAmelCase ( lowerCAmelCase_ ):
# warning at import time
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , lowerCAmelCase_ , )
| 46 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase : Optional[int] = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__UpperCamelCase : Tuple = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = BartTokenizer
def __init__( self : str , UpperCamelCase__ : int=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple="replace" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : Optional[Any]="<mask>" , UpperCamelCase__ : int=False , UpperCamelCase__ : List[Any]=True , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[int] = pre_tok_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : Dict = '''post_processor'''
SCREAMING_SNAKE_CASE : str = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : List[str] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if state.get('''add_prefix_space''' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Tuple = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[Any] = True
if state.get('''trim_offsets''' , _SCREAMING_SNAKE_CASE ) != trim_offsets:
SCREAMING_SNAKE_CASE : int = trim_offsets
SCREAMING_SNAKE_CASE : Optional[int] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : int = getattr(_SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def __A ( self : Tuple ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self : Optional[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value
SCREAMING_SNAKE_CASE : Dict = value
def __A ( self : str , *UpperCamelCase__ : str , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = kwargs.get('''is_split_into_words''' , _SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A ( self : Tuple , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = kwargs.get('''is_split_into_words''' , _SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def __A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Dict=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 182 |
'''simple docstring'''
import math
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 321 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''lxmert'''
A__ = {}
def __init__( self : Optional[int] , __a : List[str]=30522 , __a : int=768 , __a : List[str]=12 , __a : int=9500 , __a : Optional[Any]=1600 , __a : str=400 , __a : str=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[str]=0.1 , __a : Tuple=512 , __a : List[Any]=2 , __a : List[Any]=0.0_2 , __a : Union[str, Any]=1e-12 , __a : Optional[Any]=9 , __a : Union[str, Any]=5 , __a : List[Any]=5 , __a : Optional[Any]=2048 , __a : int=4 , __a : List[Any]=6.6_7 , __a : Optional[int]=True , __a : Tuple=True , __a : Dict=True , __a : Tuple=True , __a : Optional[Any]=True , __a : Optional[int]=True , __a : int=True , **__a : Dict , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Union[str, Any] = hidden_act
__snake_case : Tuple = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : Optional[int] = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : List[Any] = num_qa_labels
__snake_case : List[str] = num_object_labels
__snake_case : Any = num_attr_labels
__snake_case : str = l_layers
__snake_case : List[str] = x_layers
__snake_case : int = r_layers
__snake_case : Optional[Any] = visual_feat_dim
__snake_case : Any = visual_pos_dim
__snake_case : List[Any] = visual_loss_normalizer
__snake_case : int = task_matched
__snake_case : str = task_mask_lm
__snake_case : int = task_obj_predict
__snake_case : Optional[Any] = task_qa
__snake_case : Tuple = visual_obj_loss
__snake_case : Any = visual_attr_loss
__snake_case : Any = visual_feat_loss
__snake_case : List[str] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__a )
| 371 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = {}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''llama'''
A__ = ['''past_key_values''']
def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = num_key_value_heads
__snake_case : int = hidden_act
__snake_case : Any = initializer_range
__snake_case : Any = rms_norm_eps
__snake_case : Union[str, Any] = pretraining_tp
__snake_case : Optional[int] = use_cache
__snake_case : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : Optional[Any] = self.rope_scaling.get('type' , __a )
__snake_case : Tuple = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.