code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''cyberpunk 2077'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 622 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = data
__lowerCamelCase = None
def __str__( self ):
'''simple docstring'''
return F"""{self.data}"""
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
__lowerCamelCase = None
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = self.top
while node:
yield node.data
__lowerCamelCase = node.next
def __str__( self ):
'''simple docstring'''
return "->".join([str(__UpperCAmelCase ) for item in self] )
def __len__( self ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return self.top is None
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = Node(__UpperCAmelCase )
if not self.is_empty():
__lowerCamelCase = self.top
__lowerCamelCase = node
def lowerCamelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __UpperCAmelCase )
__lowerCamelCase = self.top
__lowerCamelCase = self.top.next
return pop_node.data
def lowerCamelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 622 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ = getLogger(__name__)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : int = 8 ,_UpperCamelCase : str = DEFAULT_DEVICE ,_UpperCamelCase : Dict=False ,_UpperCamelCase : Dict="summarization" ,_UpperCamelCase : Optional[int]=None ,**_UpperCamelCase : Dict ,):
__lowerCamelCase = Path(_UpperCamelCase ).open('''w''' ,encoding='''utf-8''' )
__lowerCamelCase = str(_UpperCamelCase )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
__lowerCamelCase = model.half()
__lowerCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCamelCase = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase ,_UpperCamelCase )
if prefix is None:
__lowerCamelCase = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase ,_UpperCamelCase ) ) ):
__lowerCamelCase = [prefix + text for text in examples_chunk]
__lowerCamelCase = tokenizer(_UpperCamelCase ,return_tensors='''pt''' ,truncation=_UpperCamelCase ,padding='''longest''' ).to(_UpperCamelCase )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**_UpperCamelCase ,)
__lowerCamelCase = tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowerCamelCase = int(time.time() - start_time ) # seconds
__lowerCamelCase = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def a__ ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a__ ( _UpperCamelCase : Union[str, Any]=True ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' ,type=_UpperCamelCase ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' ,type=_UpperCamelCase ,help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' ,type=_UpperCamelCase ,help='''where to save summaries''' )
parser.add_argument('''--reference_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default='''metrics.json''' ,help='''where to save metrics''' )
parser.add_argument('''--device''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' ,type=_UpperCamelCase ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=_UpperCamelCase ,default=8 ,required=_UpperCamelCase ,help='''batch size''' )
parser.add_argument(
'''--n_obs''' ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--dump-args''' ,action='''store_true''' ,help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' ,nargs='''?''' ,type=_UpperCamelCase ,const=datetime_now() ,help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCamelCase ,__lowerCamelCase = parser.parse_known_args()
__lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowerCamelCase = generate_summaries_or_translations(
_UpperCamelCase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**_UpperCamelCase ,)
if args.reference_path is None:
return {}
# Compute scores
__lowerCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
__lowerCamelCase = score_fn(_UpperCamelCase ,_UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
__lowerCamelCase = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase ,open(args.score_path ,'''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 622 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a__ ( ):
__lowerCamelCase = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
__lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw ).convert('''RGB''' )
return image
def a__ ( _UpperCamelCase : Any ):
__lowerCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : str ,_UpperCamelCase : str ):
__lowerCamelCase = dct.pop(_UpperCamelCase )
__lowerCamelCase = val
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : str ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__lowerCamelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
__lowerCamelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__lowerCamelCase = torch.cat((q_bias, torch.zeros_like(_UpperCamelCase ,requires_grad=_UpperCamelCase ), v_bias) )
__lowerCamelCase = qkv_bias
def a__ ( _UpperCamelCase : List[str] ):
__lowerCamelCase = 3_64 if '''coco''' in model_name else 2_24
__lowerCamelCase = InstructBlipVisionConfig(image_size=_UpperCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__lowerCamelCase = TaConfig.from_pretrained('''google/flan-t5-xl''' ,dense_act_fn='''gelu''' ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__lowerCamelCase = TaConfig.from_pretrained('''google/flan-t5-xxl''' ,dense_act_fn='''gelu''' ,bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__lowerCamelCase = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' ,vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
__lowerCamelCase = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' ,vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__lowerCamelCase = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
__lowerCamelCase = InstructBlipConfig(vision_config=_UpperCamelCase ,text_config=_UpperCamelCase ,qformer_config=_UpperCamelCase )
return config, image_size
@torch.no_grad()
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Union[str, Any]=False ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-uncased''' ,truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
__lowerCamelCase = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' ,truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__lowerCamelCase = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' ,truncation_side='''left''' ,bos_token='''</s>''' ,unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
__lowerCamelCase ,__lowerCamelCase = get_blipa_config(_UpperCamelCase )
__lowerCamelCase = InstructBlipForConditionalGeneration(_UpperCamelCase ).eval()
__lowerCamelCase = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
__lowerCamelCase ,__lowerCamelCase = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__lowerCamelCase = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
__lowerCamelCase = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = load_model_and_preprocess(
name=_UpperCamelCase ,model_type=_UpperCamelCase ,is_eval=_UpperCamelCase ,device=_UpperCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__lowerCamelCase = original_model.state_dict()
__lowerCamelCase = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__lowerCamelCase = state_dict.pop(_UpperCamelCase )
if key.startswith('''Qformer.bert''' ):
__lowerCamelCase = key.replace('''Qformer.bert''' ,'''qformer''' )
if "attention.self" in key:
__lowerCamelCase = key.replace('''self''' ,'''attention''' )
if "llm_proj" in key:
__lowerCamelCase = key.replace('''llm_proj''' ,'''language_projection''' )
if "t5_proj" in key:
__lowerCamelCase = key.replace('''t5_proj''' ,'''language_projection''' )
if key.startswith('''llm_model''' ):
__lowerCamelCase = key.replace('''llm_model''' ,'''language_model''' )
if key.startswith('''t5''' ):
__lowerCamelCase = key.replace('''t5''' ,'''language''' )
__lowerCamelCase = val
# read in qv biases
read_in_q_v_bias(_UpperCamelCase ,_UpperCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = load_demo_image()
__lowerCamelCase = '''What is unusual about this image?'''
# create processor
__lowerCamelCase = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} ,image_mean=_UpperCamelCase ,image_std=_UpperCamelCase )
__lowerCamelCase = InstructBlipProcessor(
image_processor=_UpperCamelCase ,tokenizer=_UpperCamelCase ,qformer_tokenizer=_UpperCamelCase ,)
__lowerCamelCase = processor(images=_UpperCamelCase ,text=_UpperCamelCase ,return_tensors='''pt''' ).to(_UpperCamelCase )
# make sure processor creates exact same pixel values
__lowerCamelCase = vis_processors['''eval'''](_UpperCamelCase ).unsqueeze(0 ).to(_UpperCamelCase )
__lowerCamelCase = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,_UpperCamelCase )
original_model.to(_UpperCamelCase )
hf_model.to(_UpperCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
__lowerCamelCase = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
__lowerCamelCase = hf_model(**_UpperCamelCase ).logits
else:
__lowerCamelCase = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
__lowerCamelCase = tokenizer('''\n''' ,return_tensors='''pt''' ).input_ids.to(_UpperCamelCase )
__lowerCamelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-1_00 )
__lowerCamelCase = hf_model(**_UpperCamelCase ,labels=_UpperCamelCase ).logits
print('''First values of original logits:''' ,original_logits[0, :3, :3] )
print('''First values of HF logits:''' ,logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__lowerCamelCase = 1e-4 if '''vicuna''' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) ,_UpperCamelCase ,atol=_UpperCamelCase )
print('''Looks ok!''' )
print('''Generating with original model...''' )
__lowerCamelCase = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} ,num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
__lowerCamelCase = hf_model.generate(
**_UpperCamelCase ,do_sample=_UpperCamelCase ,num_beams=5 ,max_length=2_56 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,)
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__lowerCamelCase = 2
print('''Original generation:''' ,_UpperCamelCase )
__lowerCamelCase = processor.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase )
__lowerCamelCase = [text.strip() for text in output_text]
print('''HF generation:''' ,_UpperCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
a_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 622 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[str] ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : Any=None ):
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(_UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
lowerCAmelCase__ = OPTConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = """gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = embed_dim
__lowerCamelCase = word_embed_proj_dim
__lowerCamelCase = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__UpperCAmelCase , **self.config_updates , )
__lowerCamelCase = prepare_opt_inputs_dict(__UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel(config=__UpperCAmelCase )
__lowerCamelCase = inputs_dict['''input_ids''']
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCamelCase = model_class(config=__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __UpperCAmelCase )
# check that weights remain the same after resizing
__lowerCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __UpperCAmelCase )
__lowerCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
return tf.constant(_UpperCamelCase ,dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCamelCase = input_ids.shape[0]
__lowerCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowerCamelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase = tf.not_equal(__UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
__lowerCamelCase = model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase ).last_hidden_state
__lowerCamelCase = (1, 11, 512)
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-3 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = xla_generate(__UpperCAmelCase , __UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = '''facebook/opt-350m'''
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCamelCase = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-125m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = '''left'''
# use different length sentences to test batching
__lowerCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase )
__lowerCamelCase = inputs['''input_ids''']
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''] )
__lowerCamelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase )
__lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowerCamelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 622 | 1 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a_ = None
try:
import msvcrt
except ImportError:
a_ = None
try:
import fcntl
except ImportError:
a_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a_ = OSError
# Data
# ------------------------------------------------
a_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
a_ = """3.0.12"""
a_ = None
def a__ ( ):
global _logger
__lowerCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock_file
return None
def __str__( self ):
'''simple docstring'''
__lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase )
# The path to the lock file.
__lowerCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCamelCase = None
# The default timeout value.
__lowerCamelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCamelCase = 0
return None
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = float(__UpperCAmelCase )
return None
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ):
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
__lowerCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCamelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__UpperCAmelCase )
return None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = os.path.basename(__UpperCAmelCase )
if len(__UpperCAmelCase ) > max_length and max_length > 0:
__lowerCamelCase = os.path.dirname(__UpperCAmelCase )
__lowerCamelCase = str(hash(__UpperCAmelCase ) )
__lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
__lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
try:
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN )
os.close(__UpperCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__lowerCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a_ = None
if msvcrt:
a_ = WindowsFileLock
elif fcntl:
a_ = UnixFileLock
else:
a_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 622 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ = logging.getLogger(__name__)
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any] ):
__lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 )
return np.sum(outputs == labels )
def a__ ( _UpperCamelCase : Optional[int] ):
with open(_UpperCamelCase ,encoding='''utf_8''' ) as f:
__lowerCamelCase = csv.reader(_UpperCamelCase )
__lowerCamelCase = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Dict ):
__lowerCamelCase = []
for dataset in encoded_datasets:
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch, 2) ,dtype=np.intaa )
__lowerCamelCase = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = mc_label
__lowerCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' ,type=_UpperCamelCase ,default='''openai-gpt''' ,help='''pretrained model name''' )
parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
parser.add_argument('''--train_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--eval_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--seed''' ,type=_UpperCamelCase ,default=42 )
parser.add_argument('''--num_train_epochs''' ,type=_UpperCamelCase ,default=3 )
parser.add_argument('''--train_batch_size''' ,type=_UpperCamelCase ,default=8 )
parser.add_argument('''--eval_batch_size''' ,type=_UpperCamelCase ,default=16 )
parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=_UpperCamelCase ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' ,type=_UpperCamelCase ,default=1 )
parser.add_argument(
'''--max_steps''' ,default=-1 ,type=_UpperCamelCase ,help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) ,)
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,)
parser.add_argument('''--learning_rate''' ,type=_UpperCamelCase ,default=6.25e-5 )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=_UpperCamelCase ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' ,type=_UpperCamelCase ,default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' ,type=_UpperCamelCase ,default=0.01 )
parser.add_argument('''--lm_coef''' ,type=_UpperCamelCase ,default=0.9 )
parser.add_argument('''--n_valid''' ,type=_UpperCamelCase ,default=3_74 )
parser.add_argument('''--server_ip''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
__lowerCamelCase = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowerCamelCase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase ,_UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowerCamelCase = ['''_start_''', '''_delimiter_''', '''_classify_''']
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase : Dict ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__lowerCamelCase = load_rocstories_dataset(args.train_dataset )
__lowerCamelCase = load_rocstories_dataset(args.eval_dataset )
__lowerCamelCase = (train_dataset, eval_dataset)
__lowerCamelCase = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
__lowerCamelCase = model.config.n_positions // 2 - 2
__lowerCamelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowerCamelCase = min(_UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowerCamelCase = pre_process_datasets(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,*_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = tensor_datasets[0], tensor_datasets[1]
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = RandomSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.train_batch_size )
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = SequentialSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowerCamelCase = args.max_steps
__lowerCamelCase = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
__lowerCamelCase = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowerCamelCase = list(model.named_parameters() )
__lowerCamelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__lowerCamelCase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__lowerCamelCase = AdamW(_UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon )
__lowerCamelCase = get_linear_schedule_with_warmup(
_UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=_UpperCamelCase )
if args.do_train:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ):
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = tqdm(_UpperCamelCase ,desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
__lowerCamelCase = model(_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowerCamelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowerCamelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowerCamelCase = model.module if hasattr(_UpperCamelCase ,'''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
torch.save(model_to_save.state_dict() ,_UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
__lowerCamelCase ,__lowerCamelCase = 0, 0
__lowerCamelCase ,__lowerCamelCase = 0, 0
for batch in tqdm(_UpperCamelCase ,desc='''Evaluating''' ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
with torch.no_grad():
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model(
_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = mc_logits.detach().cpu().numpy()
__lowerCamelCase = mc_labels.to('''cpu''' ).numpy()
__lowerCamelCase = accuracy(_UpperCamelCase ,_UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowerCamelCase = eval_loss / nb_eval_steps
__lowerCamelCase = eval_accuracy / nb_eval_examples
__lowerCamelCase = tr_loss / nb_tr_steps if args.do_train else None
__lowerCamelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__lowerCamelCase = os.path.join(args.output_dir ,'''eval_results.txt''' )
with open(_UpperCamelCase ,'''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' ,_UpperCamelCase ,str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 622 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a__ ( _UpperCamelCase : Any ):
__lowerCamelCase = args.pruning_method
__lowerCamelCase = args.threshold
__lowerCamelCase = args.model_name_or_path.rstrip('''/''' )
__lowerCamelCase = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
__lowerCamelCase = torch.load(os.path.join(_UpperCamelCase ,'''pytorch_model.bin''' ) )
__lowerCamelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowerCamelCase = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
__lowerCamelCase = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
__lowerCamelCase = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
__lowerCamelCase = MagnitudeBinarizer.apply(inputs=_UpperCamelCase ,threshold=_UpperCamelCase )
__lowerCamelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowerCamelCase = name[:-6]
__lowerCamelCase = model[F"""{prefix_}mask_scores"""]
__lowerCamelCase = TopKBinarizer.apply(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowerCamelCase = name[:-6]
__lowerCamelCase = model[F"""{prefix_}mask_scores"""]
__lowerCamelCase = ThresholdBinarizer.apply(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowerCamelCase = name[:-6]
__lowerCamelCase = model[F"""{prefix_}mask_scores"""]
__lowerCamelCase ,__lowerCamelCase = -0.1, 1.1
__lowerCamelCase = torch.sigmoid(_UpperCamelCase )
__lowerCamelCase = s * (r - l) + l
__lowerCamelCase = s_bar.clamp(min=0.0 ,max=1.0 )
__lowerCamelCase = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
__lowerCamelCase = os.path.join(
os.path.dirname(_UpperCamelCase ) ,F"""bertarized_{os.path.basename(_UpperCamelCase )}""" )
if not os.path.isdir(_UpperCamelCase ):
shutil.copytree(_UpperCamelCase ,_UpperCamelCase )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(_UpperCamelCase ,os.path.join(_UpperCamelCase ,'''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
a_ = parser.parse_args()
main(args)
| 622 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1024 , __UpperCAmelCase=1024 , __UpperCAmelCase=3.6 ):
'''simple docstring'''
__lowerCamelCase = tokenizer
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = dataset
__lowerCamelCase = seq_length
__lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = iter(self.dataset )
__lowerCamelCase = True
while more_examples:
__lowerCamelCase ,__lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__UpperCAmelCase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowerCamelCase = False
break
__lowerCamelCase = tokenizer(__UpperCAmelCase , truncation=__UpperCAmelCase )['''input_ids''']
__lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__UpperCAmelCase ) , self.seq_length ):
__lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(__UpperCAmelCase ) == self.seq_length:
yield torch.tensor(__UpperCAmelCase )
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = {'''streaming''': True}
__lowerCamelCase = load_dataset(args.dataset_name ,split='''train''' ,**_UpperCamelCase )
__lowerCamelCase = ConstantLengthDataset(_UpperCamelCase ,_UpperCamelCase ,seq_length=args.seq_length )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=args.batch_size )
return eval_dataloader
def a__ ( _UpperCamelCase : str ):
model.eval()
__lowerCamelCase = []
for step, batch in enumerate(_UpperCamelCase ):
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase ,labels=_UpperCamelCase )
__lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCamelCase = torch.mean(torch.cat(_UpperCamelCase ) )
try:
__lowerCamelCase = torch.exp(_UpperCamelCase )
except OverflowError:
__lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
a_ = Accelerator()
# Parse configuration
a_ = HfArgumentParser(EvaluationArguments)
a_ = parser.parse_args()
set_seed(args.seed)
# Logging
a_ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
a_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
a_ , a_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
a_ , a_ = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 622 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
a_ = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
a_ = {
"""camembert-base""": 512,
}
a_ = """▁"""
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
__lowerCamelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__lowerCamelCase = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
__lowerCamelCase = len(self.fairseq_tokens_to_ids )
__lowerCamelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__UpperCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = ''''''
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowerCamelCase = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 622 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """lxmert"""
lowerCAmelCase__ = {}
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=9500 , __UpperCAmelCase=1600 , __UpperCAmelCase=400 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=9 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=2048 , __UpperCAmelCase=4 , __UpperCAmelCase=6.67 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = num_qa_labels
__lowerCamelCase = num_object_labels
__lowerCamelCase = num_attr_labels
__lowerCamelCase = l_layers
__lowerCamelCase = x_layers
__lowerCamelCase = r_layers
__lowerCamelCase = visual_feat_dim
__lowerCamelCase = visual_pos_dim
__lowerCamelCase = visual_loss_normalizer
__lowerCamelCase = task_matched
__lowerCamelCase = task_mask_lm
__lowerCamelCase = task_obj_predict
__lowerCamelCase = task_qa
__lowerCamelCase = visual_obj_loss
__lowerCamelCase = visual_attr_loss
__lowerCamelCase = visual_feat_loss
__lowerCamelCase = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__UpperCAmelCase )
| 622 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
a_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : int ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Tuple ):
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
if weight_type is not None:
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
elif weight_type == "inv_freq":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase )
if "pos_bias_u" in name:
__lowerCamelCase = None
elif "pos_bias_v" in name:
__lowerCamelCase = None
elif "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = '''weight'''
elif "running_mean" in name:
__lowerCamelCase = '''running_mean'''
elif "inv_freq" in name:
__lowerCamelCase = '''inv_freq'''
elif "running_var" in name:
__lowerCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
__lowerCamelCase = '''num_batches_tracked'''
else:
__lowerCamelCase = None
set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Dict ,_UpperCamelCase : Any=None ,_UpperCamelCase : int=None ,_UpperCamelCase : Dict=True ):
if config_path is not None:
__lowerCamelCase = WavaVecaConformerConfig.from_pretrained(_UpperCamelCase ,hidden_act='''swish''' )
else:
__lowerCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowerCamelCase = '''rotary'''
if is_finetuned:
if dict_path:
__lowerCamelCase = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase = target_dict.pad_index
__lowerCamelCase = target_dict.bos_index
__lowerCamelCase = target_dict.eos_index
__lowerCamelCase = len(target_dict.symbols )
__lowerCamelCase = os.path.join(_UpperCamelCase ,'''vocab.json''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase )
__lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase = 0
__lowerCamelCase = 1
with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = WavaVecaCTCTokenizer(
_UpperCamelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=_UpperCamelCase ,)
__lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,)
__lowerCamelCase = WavaVecaProcessor(feature_extractor=_UpperCamelCase ,tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__lowerCamelCase = WavaVecaConformerForCTC(_UpperCamelCase )
else:
__lowerCamelCase = WavaVecaConformerForPreTraining(_UpperCamelCase )
if is_finetuned:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowerCamelCase = argparse.Namespace(task='''audio_pretraining''' )
__lowerCamelCase = fairseq.tasks.setup_task(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=_UpperCamelCase )
__lowerCamelCase = model[0].eval()
recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ,not is_finetuned )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 622 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length, 2) ,_UpperCamelCase )
else:
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length) ,_UpperCamelCase )
for i, tensor in enumerate(_UpperCamelCase ):
if padding_side == "right":
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
else:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = ord(_UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__lowerCamelCase = unicodedata.category(_UpperCamelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = -1_0_0
lowerCAmelCase__ = "pt"
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
import torch
__lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCamelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCamelCase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__lowerCamelCase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowerCamelCase = self.tokenizer.padding_side
if padding_side == "right":
__lowerCamelCase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowerCamelCase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowerCamelCase = [feature['''ner_tags'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = [feature['''original_entity_spans'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 622 | 1 |
import os
a_ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a__ ( _UpperCamelCase : str ):
__lowerCamelCase = 0
__lowerCamelCase = 0
while index < len(_UpperCamelCase ) - 1:
__lowerCamelCase = SYMBOLS[numerals[index]]
__lowerCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = ''''''
__lowerCamelCase = num // 10_00
numerals += m_count * "M"
num %= 10_00
__lowerCamelCase = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
__lowerCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a__ ( _UpperCamelCase : str = "/p089_roman.txt" ):
__lowerCamelCase = 0
with open(os.path.dirname(_UpperCamelCase ) + roman_numerals_filename ) as filea:
__lowerCamelCase = filea.readlines()
for line in lines:
__lowerCamelCase = line.strip()
__lowerCamelCase = parse_roman_numerals(_UpperCamelCase )
__lowerCamelCase = generate_roman_numerals(_UpperCamelCase )
savings += len(_UpperCamelCase ) - len(_UpperCamelCase )
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 622 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=[1, 1, 2] , __UpperCAmelCase=1 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=3 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = 2
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCamelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCamelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCamelCase = self.num_hidden_layers + 2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForPreTraining(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForMaskedLM(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self , base=__UpperCAmelCase )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 622 | 1 |
a_ = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a_ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 622 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("""covid_data""", """cases deaths recovered""")
def a__ ( _UpperCamelCase : str = "https://www.worldometers.info/coronavirus/" ):
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(_UpperCamelCase ).content ).xpath(_UpperCamelCase ) )
a_ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 622 | 1 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
a_ = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class __lowerCAmelCase ( tr.AbstractTransform ):
def __init__( self , __UpperCAmelCase = " " ):
'''simple docstring'''
__lowerCamelCase = sentence_delimiter
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return list(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
for sent_idx, sentence in enumerate(__UpperCAmelCase ):
chars.extend(self.process_string(__UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
a_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
a_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
a_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
a_ = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
a_ = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )["wer"]
__lowerCamelCase = 0
__lowerCamelCase = 0
for prediction, reference in zip(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 622 |
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str = " " ):
__lowerCamelCase = []
__lowerCamelCase = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
__lowerCamelCase = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 622 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 622 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = 8
# DPR tok
__lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__lowerCamelCase = os.path.join(self.tmpdirname , '''dataset''' )
__lowerCamelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCamelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__lowerCamelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__lowerCamelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) )
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
import torch
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
__lowerCamelCase = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary.
| 622 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = 42
# setable values
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
@classmethod
def lowerCamelCase ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return cls(common=__UpperCAmelCase , init_noise_sigma=__UpperCAmelCase , timesteps=__UpperCAmelCase )
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase__ = 42
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , __UpperCAmelCase = 1000 , __UpperCAmelCase = 0.0_001 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = "linear" , __UpperCAmelCase = None , __UpperCAmelCase = "fixed_small" , __UpperCAmelCase = True , __UpperCAmelCase = "epsilon" , __UpperCAmelCase = jnp.floataa , ):
'''simple docstring'''
__lowerCamelCase = dtype
def lowerCamelCase ( self , __UpperCAmelCase = None ):
'''simple docstring'''
if common is None:
__lowerCamelCase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowerCamelCase = jnp.array(1.0 , dtype=self.dtype )
__lowerCamelCase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCAmelCase , init_noise_sigma=__UpperCAmelCase , timesteps=__UpperCAmelCase , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
return sample
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = () ):
'''simple docstring'''
__lowerCamelCase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (jnp.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCAmelCase , timesteps=__UpperCAmelCase , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = state.common.alphas_cumprod[t]
__lowerCamelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowerCamelCase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowerCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowerCamelCase = jnp.clip(__UpperCAmelCase , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowerCamelCase = jnp.log(jnp.clip(__UpperCAmelCase , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
__lowerCamelCase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowerCamelCase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowerCamelCase = variance
__lowerCamelCase = state.common.betas[t]
__lowerCamelCase = (predicted_variance + 1) / 2
__lowerCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , ):
'''simple docstring'''
__lowerCamelCase = timestep
if key is None:
__lowerCamelCase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowerCamelCase ,__lowerCamelCase = jnp.split(__UpperCAmelCase , sample.shape[1] , axis=1 )
else:
__lowerCamelCase = None
# 1. compute alphas, betas
__lowerCamelCase = state.common.alphas_cumprod[t]
__lowerCamelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowerCamelCase = 1 - alpha_prod_t
__lowerCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowerCamelCase = model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowerCamelCase = jnp.clip(__UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowerCamelCase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowerCamelCase = jax.random.split(__UpperCAmelCase , num=1 )
__lowerCamelCase = jax.random.normal(__UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCAmelCase , __UpperCAmelCase , predicted_variance=__UpperCAmelCase ) ** 0.5) * noise
__lowerCamelCase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowerCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCAmelCase , state=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
return add_noise_common(state.common , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
return get_velocity_common(state.common , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 622 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """poolformer"""
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=4.0 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[64, 128, 320, 512] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[2, 1, 1, 1] , __UpperCAmelCase=4 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = pool_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = mlp_ratio
__lowerCamelCase = depths
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_layer_scale
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = initializer_range
super().__init__(**__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 2E-3
| 622 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
a_ = logging.get_logger(__name__)
class __lowerCAmelCase :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase ( self ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """optuna"""
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_optuna(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_optuna(__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """ray"""
lowerCAmelCase__ = """'ray[tune]'"""
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_ray_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_ray(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_ray(__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """sigopt"""
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_sigopt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_sigopt(__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """wandb"""
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_wandb(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_wandb(__UpperCAmelCase )
a_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a__ ( ):
__lowerCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
__lowerCamelCase = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
F"""{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 622 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """visual_bert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=512 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = visual_embedding_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = bypass_transformer
__lowerCamelCase = special_visual_initialize
| 622 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
a_ = logging.getLogger(__name__)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase=-1 ):
'''simple docstring'''
# in NER datasets, the last column is usually reserved for NER label
__lowerCamelCase = label_idx
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = mode.value
__lowerCamelCase = os.path.join(__UpperCAmelCase , F"""{mode}.txt""" )
__lowerCamelCase = 1
__lowerCamelCase = []
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
__lowerCamelCase = []
__lowerCamelCase = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
guid_index += 1
__lowerCamelCase = []
__lowerCamelCase = []
else:
__lowerCamelCase = line.split(''' ''' )
words.append(splits[0] )
if len(__UpperCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
return examples
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(__UpperCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__lowerCamelCase = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(__UpperCAmelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , '''r''' ) as f:
__lowerCamelCase = f.read().splitlines()
if "O" not in labels:
__lowerCamelCase = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self ):
'''simple docstring'''
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , '''r''' ) as f:
__lowerCamelCase = f.read().splitlines()
if "O" not in labels:
__lowerCamelCase = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = mode.value
__lowerCamelCase = os.path.join(__UpperCAmelCase , F"""{mode}.txt""" )
__lowerCamelCase = 1
__lowerCamelCase = []
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(__UpperCAmelCase ):
__lowerCamelCase = []
__lowerCamelCase = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
guid_index += 1
return examples
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = 0
for sentence in parse_incr(__UpperCAmelCase ):
__lowerCamelCase = preds_list[example_id]
__lowerCamelCase = ''''''
for token in sentence:
out += F"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__UpperCAmelCase )
example_id += 1
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 622 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """spiece.model"""}
a_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a_ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a_ = """▁"""
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase = (
AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else mask_token
)
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.remove_space:
__lowerCamelCase = ''' '''.join(inputs.strip().split() )
else:
__lowerCamelCase = inputs
__lowerCamelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__lowerCamelCase = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
__lowerCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
__lowerCamelCase = outputs.lower()
return outputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.preprocess_text(__UpperCAmelCase )
__lowerCamelCase = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
__lowerCamelCase = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCamelCase = cur_pieces[1:]
else:
__lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = ''''''
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowerCamelCase = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 622 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def a__ ( _UpperCamelCase : Union[str, Any] ):
if not is_accelerate_available():
return method
__lowerCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(_UpperCamelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : Dict ,*_UpperCamelCase : List[str] ,**_UpperCamelCase : Any ):
if hasattr(self ,'''_hf_hook''' ) and hasattr(self._hf_hook ,'''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self ,*_UpperCamelCase ,**_UpperCamelCase )
return wrapper
| 622 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ = """true"""
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[str]=82 ,_UpperCamelCase : Optional[Any]=16 ):
set_seed(42 )
__lowerCamelCase = RegressionModel()
__lowerCamelCase = deepcopy(_UpperCamelCase )
__lowerCamelCase = RegressionDataset(length=_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=_UpperCamelCase )
model.to(accelerator.device )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return model, ddp_model, dataloader
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : str=False ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ,split='''validation''' )
def tokenize_function(_UpperCamelCase : int ):
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
with accelerator.main_process_first():
__lowerCamelCase = dataset.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
if use_longest:
return tokenizer.pad(_UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' )
return tokenizer.pad(_UpperCamelCase ,padding='''max_length''' ,max_length=1_28 ,return_tensors='''pt''' )
return DataLoader(_UpperCamelCase ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=16 )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : List[str] ):
__lowerCamelCase = Accelerator(dispatch_batches=_UpperCamelCase ,split_batches=_UpperCamelCase )
__lowerCamelCase = get_dataloader(_UpperCamelCase ,not dispatch_batches )
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' ,return_dict=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = []
for batch in dataloader:
__lowerCamelCase ,__lowerCamelCase = batch.values()
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCamelCase ,__lowerCamelCase = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCamelCase )
targs.append(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch.cat(_UpperCamelCase ), torch.cat(_UpperCamelCase )
return logits, targs
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : List[Any]=82 ,_UpperCamelCase : str=False ,_UpperCamelCase : List[str]=False ,_UpperCamelCase : Optional[int]=16 ):
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = get_basic_setup(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = generate_predictions(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
assert (
len(_UpperCamelCase ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCamelCase )}"""
def a__ ( _UpperCamelCase : bool = False ,_UpperCamelCase : bool = False ):
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
__lowerCamelCase ,__lowerCamelCase = get_mrpc_setup(_UpperCamelCase ,_UpperCamelCase )
# First do baseline
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''no''']
model.to(_UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCamelCase )
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCamelCase ,references=batch['''labels'''] )
__lowerCamelCase = metric.compute()
# Then do distributed
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase = batch['''labels''']
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCamelCase ,references=_UpperCamelCase )
__lowerCamelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def a__ ( ):
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_UpperCamelCase ,_UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__lowerCamelCase = Accelerator()
test_torch_metrics(_UpperCamelCase ,5_12 )
accelerator.state._reset_state()
def a__ ( _UpperCamelCase : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 622 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = 13
__lowerCamelCase = 7
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = 99
__lowerCamelCase = 32
__lowerCamelCase = 2
__lowerCamelCase = 4
__lowerCamelCase = 37
__lowerCamelCase = '''gelu'''
__lowerCamelCase = 0.1
__lowerCamelCase = 0.1
__lowerCamelCase = 512
__lowerCamelCase = 16
__lowerCamelCase = 2
__lowerCamelCase = 0.02
__lowerCamelCase = 3
__lowerCamelCase = 4
__lowerCamelCase = None
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFRoFormerModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = True
__lowerCamelCase = TFRoFormerForCausalLM(config=__UpperCAmelCase )
__lowerCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFRoFormerForMaskedLM(config=__UpperCAmelCase )
__lowerCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFRoFormerForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFRoFormerForMultipleChoice(config=__UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFRoFormerForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFRoFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__lowerCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase = model(__UpperCAmelCase )[0]
# TODO Replace vocab size
__lowerCamelCase = 50000
__lowerCamelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , __UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__lowerCamelCase = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = 1e-4
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tf.constant([[4, 10]] )
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__lowerCamelCase = emba(input_ids.shape )
__lowerCamelCase = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__lowerCamelCase = emba.weight[:3, :5]
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = 1e-4
def lowerCamelCase ( self ):
'''simple docstring'''
# 2,12,16,64
__lowerCamelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__lowerCamelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__lowerCamelCase = embed_positions([2, 16, 768] )[None, None, :, :]
__lowerCamelCase ,__lowerCamelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
__lowerCamelCase = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
| 622 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCamelCase = CLIPTextModel(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = CLIPTextModelWithProjection(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__lowerCamelCase = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = sd_pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = negative_prompt
__lowerCamelCase = 3 * [inputs['''prompt''']]
__lowerCamelCase = sd_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
__lowerCamelCase = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
__lowerCamelCase = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_inputs(__UpperCAmelCase )
__lowerCamelCase = pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 622 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """gptj"""
lowerCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = n_positions
__lowerCamelCase = n_embd
__lowerCamelCase = n_layer
__lowerCamelCase = n_head
__lowerCamelCase = n_inner
__lowerCamelCase = rotary_dim
__lowerCamelCase = activation_function
__lowerCamelCase = resid_pdrop
__lowerCamelCase = embd_pdrop
__lowerCamelCase = attn_pdrop
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_range
__lowerCamelCase = use_cache
__lowerCamelCase = bos_token_id
__lowerCamelCase = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ):
# TODO: how to do that better?
__lowerCamelCase = 0
@property
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
__lowerCamelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
'''simple docstring'''
__lowerCamelCase = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
__lowerCamelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCamelCase ,__lowerCamelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowerCamelCase = seqlen + 2
__lowerCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCamelCase = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
__lowerCamelCase = common_inputs['''attention_mask''']
if self.use_past:
__lowerCamelCase = ordered_inputs['''attention_mask'''].dtype
__lowerCamelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 13
| 622 |
import torch
from diffusers import StableDiffusionPipeline
a_ = """path-to-your-trained-model"""
a_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
a_ = """A photo of sks dog in a bucket"""
a_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 622 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a_ = """sshleifer/mar_enro_6_3_student"""
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=__UpperCAmelCase , )
__lowerCamelCase = F"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def lowerCamelCase ( self ):
'''simple docstring'''
MarianMTModel.from_pretrained(__UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
__lowerCamelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
__lowerCamelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
__lowerCamelCase = bash_script.replace(__UpperCAmelCase , str(__UpperCAmelCase ) )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__lowerCamelCase = F"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__lowerCamelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(__UpperCAmelCase , '''argv''' , __UpperCAmelCase ):
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(__UpperCAmelCase )
__lowerCamelCase = SummarizationModule.add_model_specific_args(__UpperCAmelCase , os.getcwd() )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = main(__UpperCAmelCase )
# Check metrics
__lowerCamelCase = load_json(model.metrics_save_path )
__lowerCamelCase = metrics['''val'''][0]
__lowerCamelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __UpperCAmelCase )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCamelCase = os.listdir(__UpperCAmelCase )
__lowerCamelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
__lowerCamelCase = os.path.join(args.output_dir , __UpperCAmelCase )
__lowerCamelCase = torch.load(__UpperCAmelCase , map_location='''cpu''' )
__lowerCamelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCamelCase = {os.path.basename(__UpperCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class __lowerCAmelCase ( lowerCAmelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = F"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
__lowerCamelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
__lowerCamelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
__lowerCamelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
__lowerCamelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
__lowerCamelCase = bash_script.replace(__UpperCAmelCase , str(__UpperCAmelCase ) )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = bash_script.replace('''--fp16''' , '''''' )
__lowerCamelCase = 6
__lowerCamelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
F"""--output_dir={output_dir}""",
'''--gpus=1''',
'''--learning_rate=1e-3''',
F"""--num_train_epochs={epochs}""",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(__UpperCAmelCase , '''argv''' , __UpperCAmelCase ):
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(__UpperCAmelCase )
__lowerCamelCase = SummarizationDistiller.add_model_specific_args(__UpperCAmelCase , os.getcwd() )
__lowerCamelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__lowerCamelCase = distill_main(__UpperCAmelCase )
# Check metrics
__lowerCamelCase = load_json(model.metrics_save_path )
__lowerCamelCase = metrics['''val'''][0]
__lowerCamelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __UpperCAmelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCamelCase = os.listdir(__UpperCAmelCase )
__lowerCamelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
__lowerCamelCase = os.path.join(args.output_dir , __UpperCAmelCase )
__lowerCamelCase = torch.load(__UpperCAmelCase , map_location='''cpu''' )
__lowerCamelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCamelCase = {os.path.basename(__UpperCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 622 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
def a__ ( _UpperCamelCase : List[str] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
__lowerCamelCase = '''What is the placebo?'''
__lowerCamelCase = [
{
'''image''': load_image(__UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = dqa_pipeline(__UpperCAmelCase , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase ), '''start''': ANY(__UpperCAmelCase ), '''end''': ANY(__UpperCAmelCase )},
{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase ), '''start''': ANY(__UpperCAmelCase ), '''end''': ANY(__UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''How many cats are there?'''
__lowerCamelCase = [
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
__lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCAmelCase )
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCAmelCase , revision='''3dc6de3''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
__lowerCamelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCAmelCase )
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
__lowerCamelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 1 |
def a__ ( _UpperCamelCase : List[str] ):
__lowerCamelCase = 1
__lowerCamelCase = 2
while i * i <= n:
__lowerCamelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def a__ ( ):
__lowerCamelCase = 1
__lowerCamelCase = 1
while True:
i += 1
t_num += i
if count_divisors(_UpperCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 622 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = XLMProphetNetTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''[PAD]'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__UpperCAmelCase ) , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = [35389, 6672, 49, 2]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# fmt: off
__lowerCamelCase = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 622 | 1 |
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ):
return int((input_a, input_a).count(0 ) != 0 )
def a__ ( ):
assert nand_gate(0 ,0 ) == 1
assert nand_gate(0 ,1 ) == 1
assert nand_gate(1 ,0 ) == 1
assert nand_gate(1 ,1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 622 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a_ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
a_ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def a__ ( _UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = None
# source code of `config_class`
__lowerCamelCase = inspect.getsource(_UpperCamelCase )
__lowerCamelCase = _re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__lowerCamelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__lowerCamelCase = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__lowerCamelCase = ckpt_name
break
return checkpoint
def a__ ( ):
__lowerCamelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__lowerCamelCase = get_checkpoint_from_config_class(_UpperCamelCase )
__lowerCamelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
__lowerCamelCase = '''\n'''.join(sorted(_UpperCamelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 622 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """poolformer"""
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=4.0 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[64, 128, 320, 512] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[2, 1, 1, 1] , __UpperCAmelCase=4 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = pool_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = mlp_ratio
__lowerCamelCase = depths
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_layer_scale
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = initializer_range
super().__init__(**__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 2E-3
| 622 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = (DDPMScheduler,)
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__UpperCAmelCase )
return config
def lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=__UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , )
def lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**__UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**__UpperCAmelCase )
__lowerCamelCase = len(__UpperCAmelCase )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
__lowerCamelCase = torch.manual_seed(0 )
for t in reversed(range(__UpperCAmelCase ) ):
# 1. predict noise residual
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowerCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowerCamelCase = pred_prev_sample
__lowerCamelCase = torch.sum(torch.abs(__UpperCAmelCase ) )
__lowerCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowerCamelCase = scheduler_class(**__UpperCAmelCase )
__lowerCamelCase = len(__UpperCAmelCase )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
__lowerCamelCase = torch.manual_seed(0 )
for t in reversed(range(__UpperCAmelCase ) ):
# 1. predict noise residual
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowerCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowerCamelCase = pred_prev_sample
__lowerCamelCase = torch.sum(torch.abs(__UpperCAmelCase ) )
__lowerCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**__UpperCAmelCase )
__lowerCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
__lowerCamelCase = scheduler.timesteps
for i, timestep in enumerate(__UpperCAmelCase ):
if i == len(__UpperCAmelCase ) - 1:
__lowerCamelCase = -1
else:
__lowerCamelCase = timesteps[i + 1]
__lowerCamelCase = scheduler.previous_timestep(__UpperCAmelCase )
__lowerCamelCase = prev_t.item()
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**__UpperCAmelCase )
__lowerCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**__UpperCAmelCase )
__lowerCamelCase = [100, 87, 50, 1, 0]
__lowerCamelCase = len(__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__UpperCAmelCase , timesteps=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**__UpperCAmelCase )
__lowerCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
| 622 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = RoFormerTokenizer
lowerCAmelCase__ = RoFormerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''永和服装饰品有限公司,今天天气非常好'''
__lowerCamelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase = "cpu" , __UpperCAmelCase = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__lowerCamelCase = device
__lowerCamelCase = CLIPTokenizerFast.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073]
__lowerCamelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711]
__lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__lowerCamelCase = torchvision.transforms.Resize(224 )
__lowerCamelCase = torchvision.transforms.CenterCrop(224 )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.resize(__UpperCAmelCase )
__lowerCamelCase = self.center_crop(__UpperCAmelCase )
__lowerCamelCase = self.normalize(__UpperCAmelCase )
return images
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer(text=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = self.preprocess_img(__UpperCAmelCase )
__lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase=10 , __UpperCAmelCase=0.01 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="image" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = None
__lowerCamelCase = device if device else get_device()
if vqgan:
__lowerCamelCase = vqgan
else:
__lowerCamelCase = load_vqgan(self.device , conf_path=__UpperCAmelCase , ckpt_path=__UpperCAmelCase )
self.vqgan.eval()
if clip:
__lowerCamelCase = clip
else:
__lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__lowerCamelCase = ProcessorGradientFlow(device=self.device )
__lowerCamelCase = iterations
__lowerCamelCase = lr
__lowerCamelCase = log
__lowerCamelCase = make_grid
__lowerCamelCase = return_val
__lowerCamelCase = quantize
__lowerCamelCase = self.vqgan.decoder.z_shape
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=5 , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = []
if output_path is None:
__lowerCamelCase = '''./animation.gif'''
if input_path is None:
__lowerCamelCase = self.save_path
__lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(__UpperCAmelCase ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__UpperCAmelCase ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__lowerCamelCase = total_duration / len(__UpperCAmelCase )
__lowerCamelCase = [frame_duration] * len(__UpperCAmelCase )
if extend_frames:
__lowerCamelCase = 1.5
__lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__UpperCAmelCase ) )
imageio.mimsave(__UpperCAmelCase , __UpperCAmelCase , duration=__UpperCAmelCase )
print(F"""gif saved to {output_path}""" )
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__lowerCamelCase = preprocess(Image.open(__UpperCAmelCase ) , target_image_size=256 ).to(self.device )
__lowerCamelCase = preprocess_vqgan(__UpperCAmelCase )
__lowerCamelCase ,*__lowerCamelCase = self.vqgan.encode(__UpperCAmelCase )
return z
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.latent.detach().requires_grad_()
__lowerCamelCase = base_latent + transform_vector
if self.quantize:
__lowerCamelCase ,*__lowerCamelCase = self.vqgan.quantize(__UpperCAmelCase )
else:
__lowerCamelCase = trans_latent
return self.vqgan.decode(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = self.clip_preprocessor(text=__UpperCAmelCase , images=__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase )
__lowerCamelCase = self.clip(**__UpperCAmelCase )
__lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
__lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , __UpperCAmelCase , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , __UpperCAmelCase , weights=neg_prompts['''weights'''] )
else:
__lowerCamelCase = torch.tensor([1] , device=self.device )
__lowerCamelCase = -torch.log(__UpperCAmelCase ) + torch.log(__UpperCAmelCase )
return loss
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = torch.randn_like(self.latent , requires_grad=__UpperCAmelCase , device=self.device )
__lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowerCamelCase = self._add_vector(__UpperCAmelCase )
__lowerCamelCase = loop_post_process(__UpperCAmelCase )
__lowerCamelCase = self._get_CLIP_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
print('''CLIP loss''' , __UpperCAmelCase )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__UpperCAmelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
wandb.init(reinit=__UpperCAmelCase , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__lowerCamelCase = Image.open(__UpperCAmelCase )
__lowerCamelCase = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(__UpperCAmelCase ) )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if not prompts:
return []
__lowerCamelCase = []
__lowerCamelCase = []
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__UpperCAmelCase , (tuple, list) ):
__lowerCamelCase = prompt[0]
__lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
__lowerCamelCase ,__lowerCamelCase = prompt.split(''':''' )
__lowerCamelCase = float(__UpperCAmelCase )
else:
__lowerCamelCase = prompt
__lowerCamelCase = 1.0
processed_prompts.append(__UpperCAmelCase )
weights.append(__UpperCAmelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__UpperCAmelCase , device=self.device ),
}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , ):
'''simple docstring'''
if image_path:
__lowerCamelCase = self._get_latent(__UpperCAmelCase )
else:
__lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
assert pos_prompts, "You must provide at least one positive prompt."
__lowerCamelCase = self.process_prompts(__UpperCAmelCase )
__lowerCamelCase = self.process_prompts(__UpperCAmelCase )
if save_final and save_path is None:
__lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
else:
__lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(__UpperCAmelCase )
__lowerCamelCase = save_path
__lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__UpperCAmelCase ) )
__lowerCamelCase = loop_post_process(__UpperCAmelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ):
if show_intermediate:
show_pil(__UpperCAmelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__UpperCAmelCase )} )
if show_final:
show_pil(__UpperCAmelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png""" ) )
| 622 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''cyberpunk 2077'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 622 | 1 |
def a__ ( ):
__lowerCamelCase = []
__lowerCamelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCamelCase = ''''''.join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 622 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ = getLogger(__name__)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : int = 8 ,_UpperCamelCase : str = DEFAULT_DEVICE ,_UpperCamelCase : Dict=False ,_UpperCamelCase : Dict="summarization" ,_UpperCamelCase : Optional[int]=None ,**_UpperCamelCase : Dict ,):
__lowerCamelCase = Path(_UpperCamelCase ).open('''w''' ,encoding='''utf-8''' )
__lowerCamelCase = str(_UpperCamelCase )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
__lowerCamelCase = model.half()
__lowerCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCamelCase = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase ,_UpperCamelCase )
if prefix is None:
__lowerCamelCase = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase ,_UpperCamelCase ) ) ):
__lowerCamelCase = [prefix + text for text in examples_chunk]
__lowerCamelCase = tokenizer(_UpperCamelCase ,return_tensors='''pt''' ,truncation=_UpperCamelCase ,padding='''longest''' ).to(_UpperCamelCase )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**_UpperCamelCase ,)
__lowerCamelCase = tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowerCamelCase = int(time.time() - start_time ) # seconds
__lowerCamelCase = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def a__ ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a__ ( _UpperCamelCase : Union[str, Any]=True ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' ,type=_UpperCamelCase ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' ,type=_UpperCamelCase ,help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' ,type=_UpperCamelCase ,help='''where to save summaries''' )
parser.add_argument('''--reference_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default='''metrics.json''' ,help='''where to save metrics''' )
parser.add_argument('''--device''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' ,type=_UpperCamelCase ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=_UpperCamelCase ,default=8 ,required=_UpperCamelCase ,help='''batch size''' )
parser.add_argument(
'''--n_obs''' ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--dump-args''' ,action='''store_true''' ,help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' ,nargs='''?''' ,type=_UpperCamelCase ,const=datetime_now() ,help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCamelCase ,__lowerCamelCase = parser.parse_known_args()
__lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowerCamelCase = generate_summaries_or_translations(
_UpperCamelCase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**_UpperCamelCase ,)
if args.reference_path is None:
return {}
# Compute scores
__lowerCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
__lowerCamelCase = score_fn(_UpperCamelCase ,_UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
__lowerCamelCase = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase ,open(args.score_path ,'''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 622 | 1 |
import math
def a__ ( ):
__lowerCamelCase = input('''Enter message: ''' )
__lowerCamelCase = int(input(F"""Enter key [2-{len(_UpperCamelCase ) - 1}]: """ ) )
__lowerCamelCase = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowerCamelCase = encrypt_message(_UpperCamelCase ,_UpperCamelCase )
elif mode.lower().startswith('''d''' ):
__lowerCamelCase = decrypt_message(_UpperCamelCase ,_UpperCamelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : str ):
__lowerCamelCase = [''''''] * key
for col in range(_UpperCamelCase ):
__lowerCamelCase = col
while pointer < len(_UpperCamelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_UpperCamelCase )
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : str ):
__lowerCamelCase = math.ceil(len(_UpperCamelCase ) / key )
__lowerCamelCase = key
__lowerCamelCase = (num_cols * num_rows) - len(_UpperCamelCase )
__lowerCamelCase = [''''''] * num_cols
__lowerCamelCase = 0
__lowerCamelCase = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__lowerCamelCase = 0
row += 1
return "".join(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 622 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[str] ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : Any=None ):
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(_UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
lowerCAmelCase__ = OPTConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = """gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = embed_dim
__lowerCamelCase = word_embed_proj_dim
__lowerCamelCase = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__UpperCAmelCase , **self.config_updates , )
__lowerCamelCase = prepare_opt_inputs_dict(__UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel(config=__UpperCAmelCase )
__lowerCamelCase = inputs_dict['''input_ids''']
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCamelCase = model_class(config=__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __UpperCAmelCase )
# check that weights remain the same after resizing
__lowerCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __UpperCAmelCase )
__lowerCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
return tf.constant(_UpperCamelCase ,dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCamelCase = input_ids.shape[0]
__lowerCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowerCamelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase = tf.not_equal(__UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
__lowerCamelCase = model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase ).last_hidden_state
__lowerCamelCase = (1, 11, 512)
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-3 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = xla_generate(__UpperCAmelCase , __UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = '''facebook/opt-350m'''
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCamelCase = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-125m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = '''left'''
# use different length sentences to test batching
__lowerCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase )
__lowerCamelCase = inputs['''input_ids''']
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''] )
__lowerCamelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase )
__lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowerCamelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 622 | 1 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = IFImgaImgSuperResolutionPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCamelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowerCamelCase ( self ):
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def lowerCamelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 622 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ = logging.getLogger(__name__)
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any] ):
__lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 )
return np.sum(outputs == labels )
def a__ ( _UpperCamelCase : Optional[int] ):
with open(_UpperCamelCase ,encoding='''utf_8''' ) as f:
__lowerCamelCase = csv.reader(_UpperCamelCase )
__lowerCamelCase = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Dict ):
__lowerCamelCase = []
for dataset in encoded_datasets:
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch, 2) ,dtype=np.intaa )
__lowerCamelCase = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = mc_label
__lowerCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' ,type=_UpperCamelCase ,default='''openai-gpt''' ,help='''pretrained model name''' )
parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
parser.add_argument('''--train_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--eval_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--seed''' ,type=_UpperCamelCase ,default=42 )
parser.add_argument('''--num_train_epochs''' ,type=_UpperCamelCase ,default=3 )
parser.add_argument('''--train_batch_size''' ,type=_UpperCamelCase ,default=8 )
parser.add_argument('''--eval_batch_size''' ,type=_UpperCamelCase ,default=16 )
parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=_UpperCamelCase ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' ,type=_UpperCamelCase ,default=1 )
parser.add_argument(
'''--max_steps''' ,default=-1 ,type=_UpperCamelCase ,help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) ,)
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,)
parser.add_argument('''--learning_rate''' ,type=_UpperCamelCase ,default=6.25e-5 )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=_UpperCamelCase ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' ,type=_UpperCamelCase ,default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' ,type=_UpperCamelCase ,default=0.01 )
parser.add_argument('''--lm_coef''' ,type=_UpperCamelCase ,default=0.9 )
parser.add_argument('''--n_valid''' ,type=_UpperCamelCase ,default=3_74 )
parser.add_argument('''--server_ip''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
__lowerCamelCase = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowerCamelCase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase ,_UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowerCamelCase = ['''_start_''', '''_delimiter_''', '''_classify_''']
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase : Dict ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__lowerCamelCase = load_rocstories_dataset(args.train_dataset )
__lowerCamelCase = load_rocstories_dataset(args.eval_dataset )
__lowerCamelCase = (train_dataset, eval_dataset)
__lowerCamelCase = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
__lowerCamelCase = model.config.n_positions // 2 - 2
__lowerCamelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowerCamelCase = min(_UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowerCamelCase = pre_process_datasets(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,*_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = tensor_datasets[0], tensor_datasets[1]
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = RandomSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.train_batch_size )
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = SequentialSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowerCamelCase = args.max_steps
__lowerCamelCase = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
__lowerCamelCase = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowerCamelCase = list(model.named_parameters() )
__lowerCamelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__lowerCamelCase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__lowerCamelCase = AdamW(_UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon )
__lowerCamelCase = get_linear_schedule_with_warmup(
_UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=_UpperCamelCase )
if args.do_train:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ):
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = tqdm(_UpperCamelCase ,desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
__lowerCamelCase = model(_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowerCamelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowerCamelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowerCamelCase = model.module if hasattr(_UpperCamelCase ,'''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
torch.save(model_to_save.state_dict() ,_UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
__lowerCamelCase ,__lowerCamelCase = 0, 0
__lowerCamelCase ,__lowerCamelCase = 0, 0
for batch in tqdm(_UpperCamelCase ,desc='''Evaluating''' ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
with torch.no_grad():
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model(
_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = mc_logits.detach().cpu().numpy()
__lowerCamelCase = mc_labels.to('''cpu''' ).numpy()
__lowerCamelCase = accuracy(_UpperCamelCase ,_UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowerCamelCase = eval_loss / nb_eval_steps
__lowerCamelCase = eval_accuracy / nb_eval_examples
__lowerCamelCase = tr_loss / nb_tr_steps if args.do_train else None
__lowerCamelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__lowerCamelCase = os.path.join(args.output_dir ,'''eval_results.txt''' )
with open(_UpperCamelCase ,'''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' ,_UpperCamelCase ,str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 622 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="resnet50" , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = out_indices if out_indices is not None else [4]
__lowerCamelCase = stage_names
__lowerCamelCase = out_features
__lowerCamelCase = backbone
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = is_training
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = self.get_config()
return config, pixel_values
def lowerCamelCase ( self ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TimmBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase ,__lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TimmBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimmBackboneModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''resnet18'''
__lowerCamelCase = '''microsoft/resnet-18'''
__lowerCamelCase = AutoBackbone.from_pretrained(__UpperCAmelCase , use_timm_backbone=__UpperCAmelCase )
__lowerCamelCase = AutoBackbone.from_pretrained(__UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCamelCase = AutoBackbone.from_pretrained(__UpperCAmelCase , use_timm_backbone=__UpperCAmelCase , out_indices=[1, 2, 3] )
__lowerCamelCase = AutoBackbone.from_pretrained(__UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
__lowerCamelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCamelCase = self.all_model_classes[0]
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
__lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = model(**__UpperCAmelCase )
__lowerCamelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCamelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(**__UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCamelCase = copy.deepcopy(__UpperCAmelCase )
__lowerCamelCase = None
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(**__UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCamelCase = copy.deepcopy(__UpperCAmelCase )
__lowerCamelCase = False
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(**__UpperCAmelCase )
| 622 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1024 , __UpperCAmelCase=1024 , __UpperCAmelCase=3.6 ):
'''simple docstring'''
__lowerCamelCase = tokenizer
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = dataset
__lowerCamelCase = seq_length
__lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = iter(self.dataset )
__lowerCamelCase = True
while more_examples:
__lowerCamelCase ,__lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__UpperCAmelCase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowerCamelCase = False
break
__lowerCamelCase = tokenizer(__UpperCAmelCase , truncation=__UpperCAmelCase )['''input_ids''']
__lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__UpperCAmelCase ) , self.seq_length ):
__lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(__UpperCAmelCase ) == self.seq_length:
yield torch.tensor(__UpperCAmelCase )
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = {'''streaming''': True}
__lowerCamelCase = load_dataset(args.dataset_name ,split='''train''' ,**_UpperCamelCase )
__lowerCamelCase = ConstantLengthDataset(_UpperCamelCase ,_UpperCamelCase ,seq_length=args.seq_length )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=args.batch_size )
return eval_dataloader
def a__ ( _UpperCamelCase : str ):
model.eval()
__lowerCamelCase = []
for step, batch in enumerate(_UpperCamelCase ):
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase ,labels=_UpperCamelCase )
__lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCamelCase = torch.mean(torch.cat(_UpperCamelCase ) )
try:
__lowerCamelCase = torch.exp(_UpperCamelCase )
except OverflowError:
__lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
a_ = Accelerator()
# Parse configuration
a_ = HfArgumentParser(EvaluationArguments)
a_ = parser.parse_args()
set_seed(args.seed)
# Logging
a_ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
a_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
a_ , a_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
a_ , a_ = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 622 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def a__ ( ):
__lowerCamelCase = ArgumentParser('''Transformers CLI tool''' ,usage='''transformers-cli <command> [<args>]''' )
__lowerCamelCase = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(_UpperCamelCase )
DownloadCommand.register_subcommand(_UpperCamelCase )
EnvironmentCommand.register_subcommand(_UpperCamelCase )
RunCommand.register_subcommand(_UpperCamelCase )
ServeCommand.register_subcommand(_UpperCamelCase )
UserCommands.register_subcommand(_UpperCamelCase )
AddNewModelCommand.register_subcommand(_UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCamelCase )
LfsCommands.register_subcommand(_UpperCamelCase )
PTtoTFCommand.register_subcommand(_UpperCamelCase )
# Let's go
__lowerCamelCase = parser.parse_args()
if not hasattr(_UpperCamelCase ,'''func''' ):
parser.print_help()
exit(1 )
# Run
__lowerCamelCase = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 622 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """lxmert"""
lowerCAmelCase__ = {}
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=9500 , __UpperCAmelCase=1600 , __UpperCAmelCase=400 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=9 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=2048 , __UpperCAmelCase=4 , __UpperCAmelCase=6.67 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = num_qa_labels
__lowerCamelCase = num_object_labels
__lowerCamelCase = num_attr_labels
__lowerCamelCase = l_layers
__lowerCamelCase = x_layers
__lowerCamelCase = r_layers
__lowerCamelCase = visual_feat_dim
__lowerCamelCase = visual_pos_dim
__lowerCamelCase = visual_loss_normalizer
__lowerCamelCase = task_matched
__lowerCamelCase = task_mask_lm
__lowerCamelCase = task_obj_predict
__lowerCamelCase = task_qa
__lowerCamelCase = visual_obj_loss
__lowerCamelCase = visual_attr_loss
__lowerCamelCase = visual_feat_loss
__lowerCamelCase = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__UpperCAmelCase )
| 622 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""nielsr/canine-s""": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
a_ = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a_ = 0
a_ = 0xe_0_0_0
a_ = 0xe_0_0_1
a_ = 0xe_0_0_2
a_ = 0xe_0_0_3
a_ = 0xe_0_0_4
# Maps special codepoints to human-readable names.
a_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
a_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=False , __UpperCAmelCase=2048 , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
__lowerCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
__lowerCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
__lowerCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
__lowerCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , model_max_length=__UpperCAmelCase , **__UpperCAmelCase , )
# Creates a mapping for looking up the IDs of special symbols.
__lowerCamelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__lowerCamelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__lowerCamelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__lowerCamelCase = UNICODE_VOCAB_SIZE
__lowerCamelCase = len(self._special_codepoints )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._unicode_vocab_size
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return list(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
try:
return ord(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return "".join(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
__lowerCamelCase = [1] + ([0] * len(__UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__UpperCAmelCase )) + [1]
return result
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
return ()
| 622 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length, 2) ,_UpperCamelCase )
else:
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length) ,_UpperCamelCase )
for i, tensor in enumerate(_UpperCamelCase ):
if padding_side == "right":
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
else:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = ord(_UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__lowerCamelCase = unicodedata.category(_UpperCamelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = -1_0_0
lowerCAmelCase__ = "pt"
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
import torch
__lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCamelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCamelCase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__lowerCamelCase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowerCamelCase = self.tokenizer.padding_side
if padding_side == "right":
__lowerCamelCase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowerCamelCase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowerCamelCase = [feature['''ner_tags'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = [feature['''original_entity_spans'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 622 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = """▁"""
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = BertGenerationTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''<s>'''
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1002 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = [18536, 2260, 101]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__lowerCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__lowerCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowerCamelCase = ''' '''.join(__UpperCAmelCase )
__lowerCamelCase = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
__lowerCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
__lowerCamelCase = BertGenerationConfig()
__lowerCamelCase = BertGenerationEncoder(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# fmt: off
__lowerCamelCase = {'''input_ids''': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 622 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=[1, 1, 2] , __UpperCAmelCase=1 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=3 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = 2
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCamelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCamelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCamelCase = self.num_hidden_layers + 2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForPreTraining(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForMaskedLM(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self , base=__UpperCAmelCase )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 622 | 1 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 622 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("""covid_data""", """cases deaths recovered""")
def a__ ( _UpperCamelCase : str = "https://www.worldometers.info/coronavirus/" ):
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(_UpperCamelCase ).content ).xpath(_UpperCamelCase ) )
a_ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 622 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Any ,_UpperCamelCase : str ):
# Initialise PyTorch model
__lowerCamelCase = TaConfig.from_json_file(_UpperCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = TaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 622 |
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str = " " ):
__lowerCamelCase = []
__lowerCamelCase = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
__lowerCamelCase = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 622 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = 8
# DPR tok
__lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__lowerCamelCase = os.path.join(self.tmpdirname , '''dataset''' )
__lowerCamelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCamelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__lowerCamelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__lowerCamelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) )
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
import torch
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
__lowerCamelCase = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary.
| 622 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a_ = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a_ = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a_ = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
a_ = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
a_ = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
a_ = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def a__ ( _UpperCamelCase : List[str] ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Any ,_UpperCamelCase : str ,_UpperCamelCase : List[Any]=False ):
__lowerCamelCase = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
__lowerCamelCase = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
__lowerCamelCase = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
__lowerCamelCase = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
__lowerCamelCase = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
__lowerCamelCase = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
__lowerCamelCase = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
__lowerCamelCase = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
__lowerCamelCase = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
__lowerCamelCase = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
__lowerCamelCase = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
__lowerCamelCase = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : Dict ,_UpperCamelCase : Tuple=None ):
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 ,dim=0 )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 ,dim=0 )
__lowerCamelCase = checkpoint[F"""{old_prefix}.norm.weight"""]
__lowerCamelCase = checkpoint[F"""{old_prefix}.norm.bias"""]
__lowerCamelCase = weight_q.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_q.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = weight_k.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_k.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = weight_v.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_v.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
__lowerCamelCase = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = torch.load(_UpperCamelCase ,map_location='''cpu''' )
__lowerCamelCase = {}
__lowerCamelCase = checkpoint['''time_embed.0.weight''']
__lowerCamelCase = checkpoint['''time_embed.0.bias''']
__lowerCamelCase = checkpoint['''time_embed.2.weight''']
__lowerCamelCase = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
__lowerCamelCase = checkpoint['''label_emb.weight''']
__lowerCamelCase = checkpoint['''input_blocks.0.0.weight''']
__lowerCamelCase = checkpoint['''input_blocks.0.0.bias''']
__lowerCamelCase = unet_config['''down_block_types''']
__lowerCamelCase = unet_config['''layers_per_block''']
__lowerCamelCase = unet_config['''attention_head_dim''']
__lowerCamelCase = unet_config['''block_out_channels''']
__lowerCamelCase = 1
__lowerCamelCase = channels_list[0]
for i, layer_type in enumerate(_UpperCamelCase ):
__lowerCamelCase = channels_list[i]
__lowerCamelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_UpperCamelCase ):
__lowerCamelCase = F"""down_blocks.{i}.resnets.{j}"""
__lowerCamelCase = F"""input_blocks.{current_layer}.0"""
__lowerCamelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCamelCase = convert_resnet(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,has_skip=_UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_UpperCamelCase ):
__lowerCamelCase = F"""down_blocks.{i}.resnets.{j}"""
__lowerCamelCase = F"""input_blocks.{current_layer}.0"""
__lowerCamelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCamelCase = convert_resnet(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,has_skip=_UpperCamelCase )
__lowerCamelCase = F"""down_blocks.{i}.attentions.{j}"""
__lowerCamelCase = F"""input_blocks.{current_layer}.1"""
__lowerCamelCase = convert_attention(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCamelCase = F"""down_blocks.{i}.downsamplers.0"""
__lowerCamelCase = F"""input_blocks.{current_layer}.0"""
__lowerCamelCase = convert_resnet(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
current_layer += 1
__lowerCamelCase = current_channels
# hardcoded the mid-block for now
__lowerCamelCase = '''mid_block.resnets.0'''
__lowerCamelCase = '''middle_block.0'''
__lowerCamelCase = convert_resnet(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = '''mid_block.attentions.0'''
__lowerCamelCase = '''middle_block.1'''
__lowerCamelCase = convert_attention(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = '''mid_block.resnets.1'''
__lowerCamelCase = '''middle_block.2'''
__lowerCamelCase = convert_resnet(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = 0
__lowerCamelCase = unet_config['''up_block_types''']
for i, layer_type in enumerate(_UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCamelCase = F"""up_blocks.{i}.resnets.{j}"""
__lowerCamelCase = F"""output_blocks.{current_layer}.0"""
__lowerCamelCase = convert_resnet(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,has_skip=_UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCamelCase = F"""up_blocks.{i}.upsamplers.0"""
__lowerCamelCase = F"""output_blocks.{current_layer-1}.1"""
__lowerCamelCase = convert_resnet(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCamelCase = F"""up_blocks.{i}.resnets.{j}"""
__lowerCamelCase = F"""output_blocks.{current_layer}.0"""
__lowerCamelCase = convert_resnet(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,has_skip=_UpperCamelCase )
__lowerCamelCase = F"""up_blocks.{i}.attentions.{j}"""
__lowerCamelCase = F"""output_blocks.{current_layer}.1"""
__lowerCamelCase = convert_attention(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCamelCase = F"""up_blocks.{i}.upsamplers.0"""
__lowerCamelCase = F"""output_blocks.{current_layer-1}.2"""
__lowerCamelCase = convert_resnet(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = checkpoint['''out.0.weight''']
__lowerCamelCase = checkpoint['''out.0.bias''']
__lowerCamelCase = checkpoint['''out.2.weight''']
__lowerCamelCase = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
a_ = parser.parse_args()
a_ = strabool(args.class_cond)
a_ = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
a_ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a_ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a_ = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
a_ = None
a_ = con_pt_to_diffuser(args.unet_path, unet_config)
a_ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a_ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a_ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a_ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
a_ = CMStochasticIterativeScheduler(**scheduler_config)
a_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 622 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """poolformer"""
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=4.0 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[64, 128, 320, 512] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[2, 1, 1, 1] , __UpperCAmelCase=4 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = pool_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = mlp_ratio
__lowerCamelCase = depths
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_layer_scale
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = initializer_range
super().__init__(**__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 2E-3
| 622 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.framework == "tf":
__lowerCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__lowerCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCAmelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.get_masked_index(__UpperCAmelCase )
__lowerCamelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
if return_tensors is None:
__lowerCamelCase = self.framework
__lowerCamelCase = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.ensure_exactly_one_mask_token(__UpperCAmelCase )
return model_inputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.model(**__UpperCAmelCase )
__lowerCamelCase = model_inputs['''input_ids''']
return model_outputs
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=5 , __UpperCAmelCase=None ):
'''simple docstring'''
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__lowerCamelCase = target_ids.shape[0]
__lowerCamelCase = model_outputs['''input_ids'''][0]
__lowerCamelCase = model_outputs['''logits''']
if self.framework == "tf":
__lowerCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__lowerCamelCase = outputs.numpy()
__lowerCamelCase = outputs[0, masked_index, :]
__lowerCamelCase = stable_softmax(__UpperCAmelCase , axis=-1 )
if target_ids is not None:
__lowerCamelCase = tf.gather_nd(tf.squeeze(__UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__lowerCamelCase = tf.expand_dims(__UpperCAmelCase , 0 )
__lowerCamelCase = tf.math.top_k(__UpperCAmelCase , k=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
__lowerCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__lowerCamelCase = outputs[0, masked_index, :]
__lowerCamelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__lowerCamelCase = probs[..., target_ids]
__lowerCamelCase ,__lowerCamelCase = probs.topk(__UpperCAmelCase )
__lowerCamelCase = []
__lowerCamelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__lowerCamelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__lowerCamelCase = input_ids.numpy().copy()
if target_ids is not None:
__lowerCamelCase = target_ids[p].tolist()
__lowerCamelCase = p
# Filter padding out:
__lowerCamelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__lowerCamelCase = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__UpperCAmelCase )
result.append(__UpperCAmelCase )
if single_mask:
return result[0]
return result
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = [targets]
try:
__lowerCamelCase = self.tokenizer.get_vocab()
except Exception:
__lowerCamelCase = {}
__lowerCamelCase = []
for target in targets:
__lowerCamelCase = vocab.get(__UpperCAmelCase , __UpperCAmelCase )
if id_ is None:
__lowerCamelCase = self.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , max_length=1 , truncation=__UpperCAmelCase , )['''input_ids''']
if len(__UpperCAmelCase ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__lowerCamelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
__lowerCamelCase = list(set(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__lowerCamelCase = np.array(__UpperCAmelCase )
return target_ids
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = {}
if targets is not None:
__lowerCamelCase = self.get_target_ids(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = target_ids
if top_k is not None:
__lowerCamelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
| 622 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """visual_bert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=512 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = visual_embedding_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = bypass_transformer
__lowerCamelCase = special_visual_initialize
| 622 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """BridgeTowerImageProcessor"""
lowerCAmelCase__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel_values + pixel_mask
__lowerCamelCase = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , **__UpperCAmelCase )
encoding.update(__UpperCAmelCase )
return encoding
def lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 622 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """spiece.model"""}
a_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a_ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a_ = """▁"""
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase = (
AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else mask_token
)
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.remove_space:
__lowerCamelCase = ''' '''.join(inputs.strip().split() )
else:
__lowerCamelCase = inputs
__lowerCamelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__lowerCamelCase = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
__lowerCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
__lowerCamelCase = outputs.lower()
return outputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.preprocess_text(__UpperCAmelCase )
__lowerCamelCase = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
__lowerCamelCase = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCamelCase = cur_pieces[1:]
else:
__lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = ''''''
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowerCamelCase = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 622 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
a_ = 50_000
a_ = 5_000
a_ , a_ = os.path.split(__file__)
a_ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def a__ ( _UpperCamelCase : datasets.Dataset ,_UpperCamelCase : List[str] ):
for i in range(_UpperCamelCase ):
__lowerCamelCase = dataset[i]
@get_duration
def a__ ( _UpperCamelCase : datasets.Dataset ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[int] ):
for i in range(0 ,len(_UpperCamelCase ) ,_UpperCamelCase ):
__lowerCamelCase = dataset[i : i + batch_size]
@get_duration
def a__ ( _UpperCamelCase : datasets.Dataset ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : List[Any] ):
with dataset.formatted_as(type=_UpperCamelCase ):
for i in range(_UpperCamelCase ):
__lowerCamelCase = dataset[i]
@get_duration
def a__ ( _UpperCamelCase : datasets.Dataset ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : int ):
with dataset.formatted_as(type=_UpperCamelCase ):
for i in range(0 ,_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = dataset[i : i + batch_size]
def a__ ( ):
__lowerCamelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__lowerCamelCase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
__lowerCamelCase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_00}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10_00}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__lowerCamelCase = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__lowerCamelCase = generate_example_dataset(
os.path.join(_UpperCamelCase ,'''dataset.arrow''' ) ,_UpperCamelCase ,num_examples=_UpperCamelCase ,seq_shapes={'''list''': (1_00,)} ,)
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ ,str(_UpperCamelCase ) )
__lowerCamelCase = func(_UpperCamelCase ,**_UpperCamelCase )
print('''shuffling dataset''' )
__lowerCamelCase = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' ,func.__name__ ,str(_UpperCamelCase ) )
__lowerCamelCase = func(
_UpperCamelCase ,**_UpperCamelCase )
with open(_UpperCamelCase ,'''wb''' ) as f:
f.write(json.dumps(_UpperCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 622 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ = """true"""
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[str]=82 ,_UpperCamelCase : Optional[Any]=16 ):
set_seed(42 )
__lowerCamelCase = RegressionModel()
__lowerCamelCase = deepcopy(_UpperCamelCase )
__lowerCamelCase = RegressionDataset(length=_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=_UpperCamelCase )
model.to(accelerator.device )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return model, ddp_model, dataloader
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : str=False ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ,split='''validation''' )
def tokenize_function(_UpperCamelCase : int ):
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
with accelerator.main_process_first():
__lowerCamelCase = dataset.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
if use_longest:
return tokenizer.pad(_UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' )
return tokenizer.pad(_UpperCamelCase ,padding='''max_length''' ,max_length=1_28 ,return_tensors='''pt''' )
return DataLoader(_UpperCamelCase ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=16 )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : List[str] ):
__lowerCamelCase = Accelerator(dispatch_batches=_UpperCamelCase ,split_batches=_UpperCamelCase )
__lowerCamelCase = get_dataloader(_UpperCamelCase ,not dispatch_batches )
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' ,return_dict=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = []
for batch in dataloader:
__lowerCamelCase ,__lowerCamelCase = batch.values()
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCamelCase ,__lowerCamelCase = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCamelCase )
targs.append(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch.cat(_UpperCamelCase ), torch.cat(_UpperCamelCase )
return logits, targs
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : List[Any]=82 ,_UpperCamelCase : str=False ,_UpperCamelCase : List[str]=False ,_UpperCamelCase : Optional[int]=16 ):
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = get_basic_setup(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = generate_predictions(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
assert (
len(_UpperCamelCase ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCamelCase )}"""
def a__ ( _UpperCamelCase : bool = False ,_UpperCamelCase : bool = False ):
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
__lowerCamelCase ,__lowerCamelCase = get_mrpc_setup(_UpperCamelCase ,_UpperCamelCase )
# First do baseline
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''no''']
model.to(_UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCamelCase )
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCamelCase ,references=batch['''labels'''] )
__lowerCamelCase = metric.compute()
# Then do distributed
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase = batch['''labels''']
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCamelCase ,references=_UpperCamelCase )
__lowerCamelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def a__ ( ):
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_UpperCamelCase ,_UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__lowerCamelCase = Accelerator()
test_torch_metrics(_UpperCamelCase ,5_12 )
accelerator.state._reset_state()
def a__ ( _UpperCamelCase : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 622 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __lowerCAmelCase ( unittest.TestCase , lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = load_tool('''text-classification''' )
self.tool.setup()
__lowerCamelCase = load_tool('''text-classification''' , remote=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(__UpperCAmelCase , '''positive''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(__UpperCAmelCase , '''positive''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(__UpperCAmelCase , '''positive''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(__UpperCAmelCase , '''positive''' )
| 622 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCamelCase = CLIPTextModel(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = CLIPTextModelWithProjection(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__lowerCamelCase = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = sd_pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = negative_prompt
__lowerCamelCase = 3 * [inputs['''prompt''']]
__lowerCamelCase = sd_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
__lowerCamelCase = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
__lowerCamelCase = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_inputs(__UpperCAmelCase )
__lowerCamelCase = pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 622 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def a__ ( _UpperCamelCase : Optional[int] ):
__lowerCamelCase = SwinvaConfig()
__lowerCamelCase = swinva_name.split('''_''' )
__lowerCamelCase = name_split[1]
if "to" in name_split[3]:
__lowerCamelCase = int(name_split[3][-3:] )
else:
__lowerCamelCase = int(name_split[3] )
if "to" in name_split[2]:
__lowerCamelCase = int(name_split[2][-2:] )
else:
__lowerCamelCase = int(name_split[2][6:] )
if model_size == "tiny":
__lowerCamelCase = 96
__lowerCamelCase = (2, 2, 6, 2)
__lowerCamelCase = (3, 6, 12, 24)
elif model_size == "small":
__lowerCamelCase = 96
__lowerCamelCase = (2, 2, 18, 2)
__lowerCamelCase = (3, 6, 12, 24)
elif model_size == "base":
__lowerCamelCase = 1_28
__lowerCamelCase = (2, 2, 18, 2)
__lowerCamelCase = (4, 8, 16, 32)
else:
__lowerCamelCase = 1_92
__lowerCamelCase = (2, 2, 18, 2)
__lowerCamelCase = (6, 12, 24, 48)
if "to" in swinva_name:
__lowerCamelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowerCamelCase = 2_18_41
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = '''imagenet-22k-id2label.json'''
__lowerCamelCase = json.load(open(hf_hub_download(_UpperCamelCase ,_UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
__lowerCamelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
else:
__lowerCamelCase = 10_00
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = '''imagenet-1k-id2label.json'''
__lowerCamelCase = json.load(open(hf_hub_download(_UpperCamelCase ,_UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
__lowerCamelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = img_size
__lowerCamelCase = num_classes
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
return config
def a__ ( _UpperCamelCase : Tuple ):
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
__lowerCamelCase = '''encoder.''' + name
if "attn.proj" in name:
__lowerCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
__lowerCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
__lowerCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
__lowerCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "q_bias" in name:
__lowerCamelCase = name.replace('''q_bias''' ,'''query.bias''' )
if "k_bias" in name:
__lowerCamelCase = name.replace('''k_bias''' ,'''key.bias''' )
if "v_bias" in name:
__lowerCamelCase = name.replace('''v_bias''' ,'''value.bias''' )
if "cpb_mlp" in name:
__lowerCamelCase = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' )
if name == "norm.weight":
__lowerCamelCase = '''layernorm.weight'''
if name == "norm.bias":
__lowerCamelCase = '''layernorm.bias'''
if "head" in name:
__lowerCamelCase = name.replace('''head''' ,'''classifier''' )
else:
__lowerCamelCase = '''swinv2.''' + name
return name
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ):
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(_UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = int(key_split[1] )
__lowerCamelCase = int(key_split[3] )
__lowerCamelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[
dim : dim * 2
]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = val
return orig_state_dict
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ):
__lowerCamelCase = timm.create_model(_UpperCamelCase ,pretrained=_UpperCamelCase )
timm_model.eval()
__lowerCamelCase = get_swinva_config(_UpperCamelCase )
__lowerCamelCase = SwinvaForImageClassification(_UpperCamelCase )
model.eval()
__lowerCamelCase = convert_state_dict(timm_model.state_dict() ,_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' ,'''-''' ) ) )
__lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw )
__lowerCamelCase = image_processor(images=_UpperCamelCase ,return_tensors='''pt''' )
__lowerCamelCase = timm_model(inputs['''pixel_values'''] )
__lowerCamelCase = model(**_UpperCamelCase ).logits
assert torch.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1e-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
model.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase ,_UpperCamelCase ) ,organization='''nandwalritik''' ,commit_message='''Add model''' ,)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a_ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 622 |
import torch
from diffusers import StableDiffusionPipeline
a_ = """path-to-your-trained-model"""
a_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
a_ = """A photo of sks dog in a bucket"""
a_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 622 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=[1, 1, 2] , __UpperCAmelCase=1 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=3 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = 2
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCamelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCamelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCamelCase = self.num_hidden_layers + 2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForPreTraining(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForMaskedLM(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self , base=__UpperCAmelCase )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 622 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
def a__ ( _UpperCamelCase : List[str] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
__lowerCamelCase = '''What is the placebo?'''
__lowerCamelCase = [
{
'''image''': load_image(__UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = dqa_pipeline(__UpperCAmelCase , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase ), '''start''': ANY(__UpperCAmelCase ), '''end''': ANY(__UpperCAmelCase )},
{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase ), '''start''': ANY(__UpperCAmelCase ), '''end''': ANY(__UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''How many cats are there?'''
__lowerCamelCase = [
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
__lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCAmelCase )
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCAmelCase , revision='''3dc6de3''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
__lowerCamelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCAmelCase )
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
__lowerCamelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(_UpperCamelCase : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase = datasets.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase = 8
else:
__lowerCamelCase = None
return tokenizer.pad(
_UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
__lowerCamelCase = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : Tuple ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1":
__lowerCamelCase = 2
# New Code #
__lowerCamelCase = int(args.gradient_accumulation_steps )
# Initialize accelerator
__lowerCamelCase = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['''num_epochs'''] )
__lowerCamelCase = int(config['''seed'''] )
__lowerCamelCase = int(config['''batch_size'''] )
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
set_seed(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase )
# Instantiate scheduler
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase ,references=_UpperCamelCase ,)
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase )
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''The number of minibatches to be ran before gradients are accumulated.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase ,_UpperCamelCase )
if __name__ == "__main__":
main()
| 622 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = XLMProphetNetTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''[PAD]'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__UpperCAmelCase ) , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = [35389, 6672, 49, 2]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# fmt: off
__lowerCamelCase = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 622 | 1 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=("DownEncoderBlock2D",) , __UpperCAmelCase=(64,) , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase="silu" , __UpperCAmelCase=True , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = layers_per_block
__lowerCamelCase = torch.nn.Convad(
__UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase = None
__lowerCamelCase = nn.ModuleList([] )
# down
__lowerCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCAmelCase ):
__lowerCamelCase = output_channel
__lowerCamelCase = block_out_channels[i]
__lowerCamelCase = i == len(__UpperCAmelCase ) - 1
__lowerCamelCase = get_down_block(
__UpperCAmelCase , num_layers=self.layers_per_block , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
self.down_blocks.append(__UpperCAmelCase )
# mid
__lowerCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# out
__lowerCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCAmelCase , eps=1E-6 )
__lowerCamelCase = nn.SiLU()
__lowerCamelCase = 2 * out_channels if double_z else out_channels
__lowerCamelCase = nn.Convad(block_out_channels[-1] , __UpperCAmelCase , 3 , padding=1 )
__lowerCamelCase = False
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = x
__lowerCamelCase = self.conv_in(__UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase ):
def custom_forward(*__UpperCAmelCase ):
return module(*__UpperCAmelCase )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
__lowerCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
# middle
__lowerCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
for down_block in self.down_blocks:
__lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase )
# middle
__lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
__lowerCamelCase = down_block(__UpperCAmelCase )
# middle
__lowerCamelCase = self.mid_block(__UpperCAmelCase )
# post-process
__lowerCamelCase = self.conv_norm_out(__UpperCAmelCase )
__lowerCamelCase = self.conv_act(__UpperCAmelCase )
__lowerCamelCase = self.conv_out(__UpperCAmelCase )
return sample
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=("UpDecoderBlock2D",) , __UpperCAmelCase=(64,) , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase="silu" , __UpperCAmelCase="group" , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = layers_per_block
__lowerCamelCase = nn.Convad(
__UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase = None
__lowerCamelCase = nn.ModuleList([] )
__lowerCamelCase = in_channels if norm_type == '''spatial''' else None
# mid
__lowerCamelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# up
__lowerCamelCase = list(reversed(__UpperCAmelCase ) )
__lowerCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
__lowerCamelCase = output_channel
__lowerCamelCase = reversed_block_out_channels[i]
__lowerCamelCase = i == len(__UpperCAmelCase ) - 1
__lowerCamelCase = get_up_block(
__UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , resnet_time_scale_shift=__UpperCAmelCase , )
self.up_blocks.append(__UpperCAmelCase )
__lowerCamelCase = output_channel
# out
if norm_type == "spatial":
__lowerCamelCase = SpatialNorm(block_out_channels[0] , __UpperCAmelCase )
else:
__lowerCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCAmelCase , eps=1E-6 )
__lowerCamelCase = nn.SiLU()
__lowerCamelCase = nn.Convad(block_out_channels[0] , __UpperCAmelCase , 3 , padding=1 )
__lowerCamelCase = False
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = z
__lowerCamelCase = self.conv_in(__UpperCAmelCase )
__lowerCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase ):
def custom_forward(*__UpperCAmelCase ):
return module(*__UpperCAmelCase )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
__lowerCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
__lowerCamelCase = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
__lowerCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
# middle
__lowerCamelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
__lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
else:
# middle
__lowerCamelCase = self.mid_block(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
__lowerCamelCase = up_block(__UpperCAmelCase , __UpperCAmelCase )
# post-process
if latent_embeds is None:
__lowerCamelCase = self.conv_norm_out(__UpperCAmelCase )
else:
__lowerCamelCase = self.conv_norm_out(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = self.conv_act(__UpperCAmelCase )
__lowerCamelCase = self.conv_out(__UpperCAmelCase )
return sample
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="random" , __UpperCAmelCase=False , __UpperCAmelCase=True ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = n_e
__lowerCamelCase = vq_embed_dim
__lowerCamelCase = beta
__lowerCamelCase = legacy
__lowerCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__lowerCamelCase = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
__lowerCamelCase = self.used.shape[0]
__lowerCamelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCamelCase = self.re_embed
__lowerCamelCase = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
__lowerCamelCase = n_e
__lowerCamelCase = sane_index_shape
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = inds.shape
assert len(__UpperCAmelCase ) > 1
__lowerCamelCase = inds.reshape(ishape[0] , -1 )
__lowerCamelCase = self.used.to(__UpperCAmelCase )
__lowerCamelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCamelCase = match.argmax(-1 )
__lowerCamelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__lowerCamelCase = self.unknown_index
return new.reshape(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = inds.shape
assert len(__UpperCAmelCase ) > 1
__lowerCamelCase = inds.reshape(ishape[0] , -1 )
__lowerCamelCase = self.used.to(__UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCamelCase = 0 # simply set to zero
__lowerCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCAmelCase )
return back.reshape(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
__lowerCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
__lowerCamelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCamelCase = torch.argmin(torch.cdist(__UpperCAmelCase , self.embedding.weight ) , dim=1 )
__lowerCamelCase = self.embedding(__UpperCAmelCase ).view(z.shape )
__lowerCamelCase = None
__lowerCamelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCamelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__lowerCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__lowerCamelCase = self.remap_to_used(__UpperCAmelCase )
__lowerCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__lowerCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__lowerCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis
__lowerCamelCase = self.unmap_to_all(__UpperCAmelCase )
__lowerCamelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCamelCase = self.embedding(__UpperCAmelCase )
if shape is not None:
__lowerCamelCase = z_q.view(__UpperCAmelCase )
# reshape back to match original input shape
__lowerCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = parameters
__lowerCamelCase ,__lowerCamelCase = torch.chunk(__UpperCAmelCase , 2 , dim=1 )
__lowerCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
__lowerCamelCase = deterministic
__lowerCamelCase = torch.exp(0.5 * self.logvar )
__lowerCamelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCamelCase = __lowerCamelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self , __UpperCAmelCase = None ):
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
__lowerCamelCase = randn_tensor(
self.mean.shape , generator=__UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
__lowerCamelCase = self.mean + self.std * sample
return x
def lowerCamelCase ( self , __UpperCAmelCase=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCamelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
return self.mean
| 622 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a_ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
a_ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def a__ ( _UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = None
# source code of `config_class`
__lowerCamelCase = inspect.getsource(_UpperCamelCase )
__lowerCamelCase = _re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__lowerCamelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__lowerCamelCase = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__lowerCamelCase = ckpt_name
break
return checkpoint
def a__ ( ):
__lowerCamelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__lowerCamelCase = get_checkpoint_from_config_class(_UpperCamelCase )
__lowerCamelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
__lowerCamelCase = '''\n'''.join(sorted(_UpperCamelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 622 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = XLMProphetNetTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''[PAD]'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__UpperCAmelCase ) , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = [35389, 6672, 49, 2]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# fmt: off
__lowerCamelCase = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 622 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=6 , __UpperCAmelCase=17 , __UpperCAmelCase=23 , __UpperCAmelCase=11 , __UpperCAmelCase=True , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = act_dim
__lowerCamelCase = state_dim
__lowerCamelCase = hidden_size
__lowerCamelCase = max_length
__lowerCamelCase = is_training
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
__lowerCamelCase = random_attention_mask((self.batch_size, self.seq_length) )
__lowerCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase ( self ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = DecisionTransformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DecisionTransformerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = DecisionTransformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(__UpperCAmelCase )] , __UpperCAmelCase )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 2 # number of steps of autoregressive prediction we will perform
__lowerCamelCase = 10 # defined by the RL environment, may be normalized
__lowerCamelCase = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
__lowerCamelCase = model.to(__UpperCAmelCase )
__lowerCamelCase = model.config
torch.manual_seed(0 )
__lowerCamelCase = torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCAmelCase , dtype=torch.floataa ) # env.reset()
__lowerCamelCase = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=__UpperCAmelCase )
__lowerCamelCase = torch.tensor(__UpperCAmelCase , device=__UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__lowerCamelCase = state
__lowerCamelCase = torch.zeros(1 , 0 , config.act_dim , device=__UpperCAmelCase , dtype=torch.floataa )
__lowerCamelCase = torch.zeros(1 , 0 , device=__UpperCAmelCase , dtype=torch.floataa )
__lowerCamelCase = torch.tensor(0 , device=__UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__UpperCAmelCase ):
__lowerCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__UpperCAmelCase )] , dim=1 )
__lowerCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=__UpperCAmelCase )] , dim=1 )
__lowerCamelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model(
states=__UpperCAmelCase , actions=__UpperCAmelCase , rewards=__UpperCAmelCase , returns_to_go=__UpperCAmelCase , timesteps=__UpperCAmelCase , attention_mask=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
__lowerCamelCase = action_pred[0, -1]
__lowerCamelCase = torch.cat([states, state] , dim=1 )
__lowerCamelCase = returns_to_go[0, -1] - reward
__lowerCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__lowerCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=__UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 622 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = RoFormerTokenizer
lowerCAmelCase__ = RoFormerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''永和服装饰品有限公司,今天天气非常好'''
__lowerCamelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''cyberpunk 2077'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 622 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ = getLogger(__name__)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : int = 8 ,_UpperCamelCase : str = DEFAULT_DEVICE ,_UpperCamelCase : Dict=False ,_UpperCamelCase : Dict="summarization" ,_UpperCamelCase : Optional[int]=None ,**_UpperCamelCase : Dict ,):
__lowerCamelCase = Path(_UpperCamelCase ).open('''w''' ,encoding='''utf-8''' )
__lowerCamelCase = str(_UpperCamelCase )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
__lowerCamelCase = model.half()
__lowerCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCamelCase = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase ,_UpperCamelCase )
if prefix is None:
__lowerCamelCase = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase ,_UpperCamelCase ) ) ):
__lowerCamelCase = [prefix + text for text in examples_chunk]
__lowerCamelCase = tokenizer(_UpperCamelCase ,return_tensors='''pt''' ,truncation=_UpperCamelCase ,padding='''longest''' ).to(_UpperCamelCase )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**_UpperCamelCase ,)
__lowerCamelCase = tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowerCamelCase = int(time.time() - start_time ) # seconds
__lowerCamelCase = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def a__ ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a__ ( _UpperCamelCase : Union[str, Any]=True ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' ,type=_UpperCamelCase ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' ,type=_UpperCamelCase ,help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' ,type=_UpperCamelCase ,help='''where to save summaries''' )
parser.add_argument('''--reference_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default='''metrics.json''' ,help='''where to save metrics''' )
parser.add_argument('''--device''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' ,type=_UpperCamelCase ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=_UpperCamelCase ,default=8 ,required=_UpperCamelCase ,help='''batch size''' )
parser.add_argument(
'''--n_obs''' ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--dump-args''' ,action='''store_true''' ,help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' ,nargs='''?''' ,type=_UpperCamelCase ,const=datetime_now() ,help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCamelCase ,__lowerCamelCase = parser.parse_known_args()
__lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowerCamelCase = generate_summaries_or_translations(
_UpperCamelCase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**_UpperCamelCase ,)
if args.reference_path is None:
return {}
# Compute scores
__lowerCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
__lowerCamelCase = score_fn(_UpperCamelCase ,_UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
__lowerCamelCase = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase ,open(args.score_path ,'''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 622 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = 3
__lowerCamelCase = (32, 32)
__lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.dummy_cond_unet_upscale
__lowerCamelCase = DDPMScheduler()
__lowerCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''A painting of a squirrel eating a burger'''
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
__lowerCamelCase = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__lowerCamelCase = output.images
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
__lowerCamelCase = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__UpperCAmelCase , )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
__lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__lowerCamelCase = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.dummy_cond_unet_upscale
__lowerCamelCase = DDPMScheduler()
__lowerCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''A painting of a squirrel eating a burger'''
__lowerCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__lowerCamelCase = output.images
assert image.shape[0] == 2
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
__lowerCamelCase = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__lowerCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.dummy_cond_unet_upscale
__lowerCamelCase = DDPMScheduler()
__lowerCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
__lowerCamelCase = self.dummy_vae
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__lowerCamelCase = unet.half()
__lowerCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
__lowerCamelCase = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''A painting of a squirrel eating a burger'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , ).images
__lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
__lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
__lowerCamelCase = '''a cat sitting on a park bench'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''np''' , )
__lowerCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
__lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
__lowerCamelCase = '''a cat sitting on a park bench'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''np''' , )
__lowerCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCamelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
__lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = '''a cat sitting on a park bench'''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type='''np''' , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 622 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[str] ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : Any=None ):
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(_UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
lowerCAmelCase__ = OPTConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = """gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = embed_dim
__lowerCamelCase = word_embed_proj_dim
__lowerCamelCase = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__UpperCAmelCase , **self.config_updates , )
__lowerCamelCase = prepare_opt_inputs_dict(__UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel(config=__UpperCAmelCase )
__lowerCamelCase = inputs_dict['''input_ids''']
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCamelCase = model_class(config=__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __UpperCAmelCase )
# check that weights remain the same after resizing
__lowerCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __UpperCAmelCase )
__lowerCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
return tf.constant(_UpperCamelCase ,dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCamelCase = input_ids.shape[0]
__lowerCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowerCamelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase = tf.not_equal(__UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
__lowerCamelCase = model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase ).last_hidden_state
__lowerCamelCase = (1, 11, 512)
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-3 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = xla_generate(__UpperCAmelCase , __UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = '''facebook/opt-350m'''
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCamelCase = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-125m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = '''left'''
# use different length sentences to test batching
__lowerCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase )
__lowerCamelCase = inputs['''input_ids''']
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''] )
__lowerCamelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase )
__lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowerCamelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 622 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ = logging.getLogger(__name__)
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any] ):
__lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 )
return np.sum(outputs == labels )
def a__ ( _UpperCamelCase : Optional[int] ):
with open(_UpperCamelCase ,encoding='''utf_8''' ) as f:
__lowerCamelCase = csv.reader(_UpperCamelCase )
__lowerCamelCase = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Dict ):
__lowerCamelCase = []
for dataset in encoded_datasets:
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch, 2) ,dtype=np.intaa )
__lowerCamelCase = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = mc_label
__lowerCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' ,type=_UpperCamelCase ,default='''openai-gpt''' ,help='''pretrained model name''' )
parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
parser.add_argument('''--train_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--eval_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--seed''' ,type=_UpperCamelCase ,default=42 )
parser.add_argument('''--num_train_epochs''' ,type=_UpperCamelCase ,default=3 )
parser.add_argument('''--train_batch_size''' ,type=_UpperCamelCase ,default=8 )
parser.add_argument('''--eval_batch_size''' ,type=_UpperCamelCase ,default=16 )
parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=_UpperCamelCase ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' ,type=_UpperCamelCase ,default=1 )
parser.add_argument(
'''--max_steps''' ,default=-1 ,type=_UpperCamelCase ,help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) ,)
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,)
parser.add_argument('''--learning_rate''' ,type=_UpperCamelCase ,default=6.25e-5 )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=_UpperCamelCase ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' ,type=_UpperCamelCase ,default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' ,type=_UpperCamelCase ,default=0.01 )
parser.add_argument('''--lm_coef''' ,type=_UpperCamelCase ,default=0.9 )
parser.add_argument('''--n_valid''' ,type=_UpperCamelCase ,default=3_74 )
parser.add_argument('''--server_ip''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
__lowerCamelCase = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowerCamelCase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase ,_UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowerCamelCase = ['''_start_''', '''_delimiter_''', '''_classify_''']
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase : Dict ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__lowerCamelCase = load_rocstories_dataset(args.train_dataset )
__lowerCamelCase = load_rocstories_dataset(args.eval_dataset )
__lowerCamelCase = (train_dataset, eval_dataset)
__lowerCamelCase = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
__lowerCamelCase = model.config.n_positions // 2 - 2
__lowerCamelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowerCamelCase = min(_UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowerCamelCase = pre_process_datasets(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,*_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = tensor_datasets[0], tensor_datasets[1]
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = RandomSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.train_batch_size )
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = SequentialSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowerCamelCase = args.max_steps
__lowerCamelCase = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
__lowerCamelCase = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowerCamelCase = list(model.named_parameters() )
__lowerCamelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__lowerCamelCase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__lowerCamelCase = AdamW(_UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon )
__lowerCamelCase = get_linear_schedule_with_warmup(
_UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=_UpperCamelCase )
if args.do_train:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ):
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = tqdm(_UpperCamelCase ,desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
__lowerCamelCase = model(_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowerCamelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowerCamelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowerCamelCase = model.module if hasattr(_UpperCamelCase ,'''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
torch.save(model_to_save.state_dict() ,_UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
__lowerCamelCase ,__lowerCamelCase = 0, 0
__lowerCamelCase ,__lowerCamelCase = 0, 0
for batch in tqdm(_UpperCamelCase ,desc='''Evaluating''' ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
with torch.no_grad():
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model(
_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = mc_logits.detach().cpu().numpy()
__lowerCamelCase = mc_labels.to('''cpu''' ).numpy()
__lowerCamelCase = accuracy(_UpperCamelCase ,_UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowerCamelCase = eval_loss / nb_eval_steps
__lowerCamelCase = eval_accuracy / nb_eval_examples
__lowerCamelCase = tr_loss / nb_tr_steps if args.do_train else None
__lowerCamelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__lowerCamelCase = os.path.join(args.output_dir ,'''eval_results.txt''' )
with open(_UpperCamelCase ,'''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' ,_UpperCamelCase ,str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 622 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
a_ = Mapping[str, np.ndarray]
a_ = Mapping[str, Any] # Is a nested dict.
a_ = 0.01
@dataclasses.dataclass(frozen=lowerCAmelCase__ )
class __lowerCAmelCase :
lowerCAmelCase__ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase__ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase__ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase__ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase__ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase__ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase__ = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase__ = None
# Chain corresponding to each parent
lowerCAmelCase__ = None
def a__ ( _UpperCamelCase : str ):
__lowerCamelCase = R'''(\[[A-Z]+\]\n)'''
__lowerCamelCase = [tag.strip() for tag in re.split(_UpperCamelCase ,_UpperCamelCase ) if len(_UpperCamelCase ) > 0]
__lowerCamelCase = zip(tags[0::2] ,[l.split('''\n''' ) for l in tags[1::2]] )
__lowerCamelCase = ["N", "CA", "C"]
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
for g in groups:
if "[PRIMARY]" == g[0]:
__lowerCamelCase = g[1][0].strip()
for i in range(len(_UpperCamelCase ) ):
if seq[i] not in residue_constants.restypes:
__lowerCamelCase = '''X''' # FIXME: strings are immutable
__lowerCamelCase = np.array(
[residue_constants.restype_order.get(_UpperCamelCase ,residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__lowerCamelCase = []
for axis in range(3 ):
tertiary.append(list(map(_UpperCamelCase ,g[1][axis].split() ) ) )
__lowerCamelCase = np.array(_UpperCamelCase )
__lowerCamelCase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_UpperCamelCase ):
__lowerCamelCase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__lowerCamelCase = np.array(list(map({'''-''': 0, '''+''': 1}.get ,g[1][0].strip() ) ) )
__lowerCamelCase = np.zeros(
(
len(_UpperCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_UpperCamelCase ):
__lowerCamelCase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_UpperCamelCase ,atom_mask=_UpperCamelCase ,aatype=_UpperCamelCase ,residue_index=np.arange(len(_UpperCamelCase ) ) ,b_factors=_UpperCamelCase ,)
def a__ ( _UpperCamelCase : Protein ,_UpperCamelCase : int = 0 ):
__lowerCamelCase = []
__lowerCamelCase = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
__lowerCamelCase = prot.parents
__lowerCamelCase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__lowerCamelCase = [p for i, p in zip(_UpperCamelCase ,_UpperCamelCase ) if i == chain_id]
if parents is None or len(_UpperCamelCase ) == 0:
__lowerCamelCase = ['''N/A''']
pdb_headers.append(F"""PARENT {" ".join(_UpperCamelCase )}""" )
return pdb_headers
def a__ ( _UpperCamelCase : Protein ,_UpperCamelCase : str ):
__lowerCamelCase = []
__lowerCamelCase = pdb_str.split('''\n''' )
__lowerCamelCase = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
__lowerCamelCase = 42
if prot.parents is not None and len(prot.parents ) > 0:
__lowerCamelCase = []
if prot.parents_chain_index is not None:
__lowerCamelCase = {}
for p, i in zip(prot.parents ,prot.parents_chain_index ):
parent_dict.setdefault(str(_UpperCamelCase ) ,[] )
parent_dict[str(_UpperCamelCase )].append(_UpperCamelCase )
__lowerCamelCase = max([int(_UpperCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__lowerCamelCase = parent_dict.get(str(_UpperCamelCase ) ,['''N/A'''] )
parents_per_chain.append(_UpperCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__lowerCamelCase = [['''N/A''']]
def make_parent_line(_UpperCamelCase : Sequence[str] ) -> str:
return F"""PARENT {" ".join(_UpperCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__lowerCamelCase = 0
for i, l in enumerate(_UpperCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_UpperCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_UpperCamelCase ):
__lowerCamelCase = parents_per_chain[chain_counter]
else:
__lowerCamelCase = ['''N/A''']
out_pdb_lines.append(make_parent_line(_UpperCamelCase ) )
return "\n".join(_UpperCamelCase )
def a__ ( _UpperCamelCase : Protein ):
__lowerCamelCase = residue_constants.restypes + ['''X''']
def res_atoa(_UpperCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] ,'''UNK''' )
__lowerCamelCase = residue_constants.atom_types
__lowerCamelCase = []
__lowerCamelCase = prot.atom_mask
__lowerCamelCase = prot.aatype
__lowerCamelCase = prot.atom_positions
__lowerCamelCase = prot.residue_index.astype(np.intaa )
__lowerCamelCase = prot.b_factors
__lowerCamelCase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
__lowerCamelCase = get_pdb_headers(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
pdb_lines.extend(_UpperCamelCase )
__lowerCamelCase = aatype.shape[0]
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = string.ascii_uppercase
__lowerCamelCase = None
# Add all atom sites.
for i in range(_UpperCamelCase ):
__lowerCamelCase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_UpperCamelCase ,atom_positions[i] ,atom_mask[i] ,b_factors[i] ):
if mask < 0.5:
continue
__lowerCamelCase = '''ATOM'''
__lowerCamelCase = atom_name if len(_UpperCamelCase ) == 4 else F""" {atom_name}"""
__lowerCamelCase = ''''''
__lowerCamelCase = ''''''
__lowerCamelCase = 1.00
__lowerCamelCase = atom_name[0] # Protein supports only C, N, O, S, this works.
__lowerCamelCase = ''''''
__lowerCamelCase = '''A'''
if chain_index is not None:
__lowerCamelCase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__lowerCamelCase = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_UpperCamelCase )
atom_index += 1
__lowerCamelCase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__lowerCamelCase = True
__lowerCamelCase = chain_index[i + 1]
if should_terminate:
# Close the chain.
__lowerCamelCase = '''TER'''
__lowerCamelCase = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_UpperCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_UpperCamelCase ,_UpperCamelCase ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(_UpperCamelCase )
def a__ ( _UpperCamelCase : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def a__ ( _UpperCamelCase : FeatureDict ,_UpperCamelCase : ModelOutput ,_UpperCamelCase : Optional[np.ndarray] = None ,_UpperCamelCase : Optional[np.ndarray] = None ,_UpperCamelCase : Optional[str] = None ,_UpperCamelCase : Optional[Sequence[str]] = None ,_UpperCamelCase : Optional[Sequence[int]] = None ,):
return Protein(
aatype=features['''aatype'''] ,atom_positions=result['''final_atom_positions'''] ,atom_mask=result['''final_atom_mask'''] ,residue_index=features['''residue_index'''] + 1 ,b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) ,chain_index=_UpperCamelCase ,remark=_UpperCamelCase ,parents=_UpperCamelCase ,parents_chain_index=_UpperCamelCase ,)
| 622 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1024 , __UpperCAmelCase=1024 , __UpperCAmelCase=3.6 ):
'''simple docstring'''
__lowerCamelCase = tokenizer
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = dataset
__lowerCamelCase = seq_length
__lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = iter(self.dataset )
__lowerCamelCase = True
while more_examples:
__lowerCamelCase ,__lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__UpperCAmelCase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowerCamelCase = False
break
__lowerCamelCase = tokenizer(__UpperCAmelCase , truncation=__UpperCAmelCase )['''input_ids''']
__lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__UpperCAmelCase ) , self.seq_length ):
__lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(__UpperCAmelCase ) == self.seq_length:
yield torch.tensor(__UpperCAmelCase )
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = {'''streaming''': True}
__lowerCamelCase = load_dataset(args.dataset_name ,split='''train''' ,**_UpperCamelCase )
__lowerCamelCase = ConstantLengthDataset(_UpperCamelCase ,_UpperCamelCase ,seq_length=args.seq_length )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=args.batch_size )
return eval_dataloader
def a__ ( _UpperCamelCase : str ):
model.eval()
__lowerCamelCase = []
for step, batch in enumerate(_UpperCamelCase ):
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase ,labels=_UpperCamelCase )
__lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCamelCase = torch.mean(torch.cat(_UpperCamelCase ) )
try:
__lowerCamelCase = torch.exp(_UpperCamelCase )
except OverflowError:
__lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
a_ = Accelerator()
# Parse configuration
a_ = HfArgumentParser(EvaluationArguments)
a_ = parser.parse_args()
set_seed(args.seed)
# Logging
a_ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
a_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
a_ , a_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
a_ , a_ = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 622 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ = logging.getLogger(__name__)
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any] ):
__lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 )
return np.sum(outputs == labels )
def a__ ( _UpperCamelCase : Optional[int] ):
with open(_UpperCamelCase ,encoding='''utf_8''' ) as f:
__lowerCamelCase = csv.reader(_UpperCamelCase )
__lowerCamelCase = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Dict ):
__lowerCamelCase = []
for dataset in encoded_datasets:
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch, 2) ,dtype=np.intaa )
__lowerCamelCase = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = mc_label
__lowerCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' ,type=_UpperCamelCase ,default='''openai-gpt''' ,help='''pretrained model name''' )
parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
parser.add_argument('''--train_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--eval_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--seed''' ,type=_UpperCamelCase ,default=42 )
parser.add_argument('''--num_train_epochs''' ,type=_UpperCamelCase ,default=3 )
parser.add_argument('''--train_batch_size''' ,type=_UpperCamelCase ,default=8 )
parser.add_argument('''--eval_batch_size''' ,type=_UpperCamelCase ,default=16 )
parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=_UpperCamelCase ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' ,type=_UpperCamelCase ,default=1 )
parser.add_argument(
'''--max_steps''' ,default=-1 ,type=_UpperCamelCase ,help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) ,)
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,)
parser.add_argument('''--learning_rate''' ,type=_UpperCamelCase ,default=6.25e-5 )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=_UpperCamelCase ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' ,type=_UpperCamelCase ,default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' ,type=_UpperCamelCase ,default=0.01 )
parser.add_argument('''--lm_coef''' ,type=_UpperCamelCase ,default=0.9 )
parser.add_argument('''--n_valid''' ,type=_UpperCamelCase ,default=3_74 )
parser.add_argument('''--server_ip''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
__lowerCamelCase = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowerCamelCase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase ,_UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowerCamelCase = ['''_start_''', '''_delimiter_''', '''_classify_''']
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase : Dict ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__lowerCamelCase = load_rocstories_dataset(args.train_dataset )
__lowerCamelCase = load_rocstories_dataset(args.eval_dataset )
__lowerCamelCase = (train_dataset, eval_dataset)
__lowerCamelCase = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
__lowerCamelCase = model.config.n_positions // 2 - 2
__lowerCamelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowerCamelCase = min(_UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowerCamelCase = pre_process_datasets(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,*_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = tensor_datasets[0], tensor_datasets[1]
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = RandomSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.train_batch_size )
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = SequentialSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowerCamelCase = args.max_steps
__lowerCamelCase = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
__lowerCamelCase = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowerCamelCase = list(model.named_parameters() )
__lowerCamelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__lowerCamelCase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__lowerCamelCase = AdamW(_UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon )
__lowerCamelCase = get_linear_schedule_with_warmup(
_UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=_UpperCamelCase )
if args.do_train:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ):
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = tqdm(_UpperCamelCase ,desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
__lowerCamelCase = model(_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowerCamelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowerCamelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowerCamelCase = model.module if hasattr(_UpperCamelCase ,'''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
torch.save(model_to_save.state_dict() ,_UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
__lowerCamelCase ,__lowerCamelCase = 0, 0
__lowerCamelCase ,__lowerCamelCase = 0, 0
for batch in tqdm(_UpperCamelCase ,desc='''Evaluating''' ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
with torch.no_grad():
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model(
_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = mc_logits.detach().cpu().numpy()
__lowerCamelCase = mc_labels.to('''cpu''' ).numpy()
__lowerCamelCase = accuracy(_UpperCamelCase ,_UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowerCamelCase = eval_loss / nb_eval_steps
__lowerCamelCase = eval_accuracy / nb_eval_examples
__lowerCamelCase = tr_loss / nb_tr_steps if args.do_train else None
__lowerCamelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__lowerCamelCase = os.path.join(args.output_dir ,'''eval_results.txt''' )
with open(_UpperCamelCase ,'''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' ,_UpperCamelCase ,str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 622 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """lxmert"""
lowerCAmelCase__ = {}
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=9500 , __UpperCAmelCase=1600 , __UpperCAmelCase=400 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=9 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=2048 , __UpperCAmelCase=4 , __UpperCAmelCase=6.67 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = num_qa_labels
__lowerCamelCase = num_object_labels
__lowerCamelCase = num_attr_labels
__lowerCamelCase = l_layers
__lowerCamelCase = x_layers
__lowerCamelCase = r_layers
__lowerCamelCase = visual_feat_dim
__lowerCamelCase = visual_pos_dim
__lowerCamelCase = visual_loss_normalizer
__lowerCamelCase = task_matched
__lowerCamelCase = task_mask_lm
__lowerCamelCase = task_obj_predict
__lowerCamelCase = task_qa
__lowerCamelCase = visual_obj_loss
__lowerCamelCase = visual_attr_loss
__lowerCamelCase = visual_feat_loss
__lowerCamelCase = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__UpperCAmelCase )
| 622 | 1 |
import os
from collections.abc import Iterator
def a__ ( _UpperCamelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCamelCase ):
__lowerCamelCase = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCamelCase ,_UpperCamelCase ).lstrip('''./''' )
def a__ ( _UpperCamelCase : Optional[int] ):
return F"""{i * " "}*""" if i else "\n##"
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCamelCase ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(_UpperCamelCase )} {new_part.replace("_" ," " ).title()}""" )
return new_path
def a__ ( _UpperCamelCase : str = "." ):
__lowerCamelCase = ''''''
for filepath in sorted(good_file_paths(_UpperCamelCase ) ):
__lowerCamelCase ,__lowerCamelCase = os.path.split(_UpperCamelCase )
if filepath != old_path:
__lowerCamelCase = print_path(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = F"""{filepath}/{filename}""".replace(''' ''' ,'''%20''' )
__lowerCamelCase = os.path.splitext(filename.replace('''_''' ,''' ''' ).title() )[0]
print(F"""{md_prefix(_UpperCamelCase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md(""".""")
| 622 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length, 2) ,_UpperCamelCase )
else:
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length) ,_UpperCamelCase )
for i, tensor in enumerate(_UpperCamelCase ):
if padding_side == "right":
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
else:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = ord(_UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__lowerCamelCase = unicodedata.category(_UpperCamelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = -1_0_0
lowerCAmelCase__ = "pt"
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
import torch
__lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCamelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCamelCase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__lowerCamelCase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowerCamelCase = self.tokenizer.padding_side
if padding_side == "right":
__lowerCamelCase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowerCamelCase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowerCamelCase = [feature['''ner_tags'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = [feature['''original_entity_spans'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 622 | 1 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
a_ = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
a_ = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
a_ = BeautifulSoup(res.text, """html.parser""")
a_ = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 622 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=[1, 1, 2] , __UpperCAmelCase=1 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=3 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = 2
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCamelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCamelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCamelCase = self.num_hidden_layers + 2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForPreTraining(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForMaskedLM(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self , base=__UpperCAmelCase )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 622 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[str] ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : Any=None ):
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(_UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
lowerCAmelCase__ = OPTConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = """gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = embed_dim
__lowerCamelCase = word_embed_proj_dim
__lowerCamelCase = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__UpperCAmelCase , **self.config_updates , )
__lowerCamelCase = prepare_opt_inputs_dict(__UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel(config=__UpperCAmelCase )
__lowerCamelCase = inputs_dict['''input_ids''']
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCamelCase = model_class(config=__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __UpperCAmelCase )
# check that weights remain the same after resizing
__lowerCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __UpperCAmelCase )
__lowerCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
return tf.constant(_UpperCamelCase ,dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCamelCase = input_ids.shape[0]
__lowerCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowerCamelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase = tf.not_equal(__UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
__lowerCamelCase = model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase ).last_hidden_state
__lowerCamelCase = (1, 11, 512)
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-3 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = xla_generate(__UpperCAmelCase , __UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = '''facebook/opt-350m'''
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCamelCase = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-125m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = '''left'''
# use different length sentences to test batching
__lowerCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase )
__lowerCamelCase = inputs['''input_ids''']
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''] )
__lowerCamelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase )
__lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowerCamelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 622 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("""covid_data""", """cases deaths recovered""")
def a__ ( _UpperCamelCase : str = "https://www.worldometers.info/coronavirus/" ):
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(_UpperCamelCase ).content ).xpath(_UpperCamelCase ) )
a_ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 622 | 1 |
from sklearn.metrics import recall_score
import datasets
a_ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
a_ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
a_ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase="binary" , __UpperCAmelCase=None , __UpperCAmelCase="warn" , ):
'''simple docstring'''
__lowerCamelCase = recall_score(
__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase , zero_division=__UpperCAmelCase , )
return {"recall": float(__UpperCAmelCase ) if score.size == 1 else score}
| 622 |
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str = " " ):
__lowerCamelCase = []
__lowerCamelCase = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
__lowerCamelCase = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 622 | 1 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = """pixel_values"""
lowerCAmelCase__ = False
lowerCAmelCase__ = TimmBackboneConfig
def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , '''timm''' )
super().__init__(__UpperCAmelCase )
__lowerCamelCase = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(__UpperCAmelCase , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
__lowerCamelCase = getattr(__UpperCAmelCase , '''use_pretrained_backbone''' , __UpperCAmelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCamelCase = config.out_indices if getattr(__UpperCAmelCase , '''out_indices''' , __UpperCAmelCase ) is not None else (-1,)
__lowerCamelCase = timm.create_model(
config.backbone , pretrained=__UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__UpperCAmelCase , **__UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCamelCase = self._backbone.return_layers
__lowerCamelCase = {layer['''module''']: str(__UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__UpperCAmelCase )
@classmethod
def lowerCamelCase ( cls , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCamelCase = kwargs.pop('''config''' , TimmBackboneConfig() )
__lowerCamelCase = kwargs.pop('''use_timm_backbone''' , __UpperCAmelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
__lowerCamelCase = kwargs.pop('''num_channels''' , config.num_channels )
__lowerCamelCase = kwargs.pop('''features_only''' , config.features_only )
__lowerCamelCase = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
__lowerCamelCase = kwargs.pop('''out_indices''' , config.out_indices )
__lowerCamelCase = TimmBackboneConfig(
backbone=__UpperCAmelCase , num_channels=__UpperCAmelCase , features_only=__UpperCAmelCase , use_pretrained_backbone=__UpperCAmelCase , out_indices=__UpperCAmelCase , )
return super()._from_config(__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
pass
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCamelCase = self._all_layers
__lowerCamelCase = self._backbone(__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = self._return_layers
__lowerCamelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCamelCase = self._backbone(__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = None
__lowerCamelCase = tuple(__UpperCAmelCase )
__lowerCamelCase = tuple(__UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
__lowerCamelCase = (feature_maps,)
if output_hidden_states:
__lowerCamelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__UpperCAmelCase , hidden_states=__UpperCAmelCase , attentions=__UpperCAmelCase )
| 622 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = 8
# DPR tok
__lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__lowerCamelCase = os.path.join(self.tmpdirname , '''dataset''' )
__lowerCamelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCamelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__lowerCamelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__lowerCamelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) )
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
import torch
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
__lowerCamelCase = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary.
| 622 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """visual_bert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=512 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = visual_embedding_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = bypass_transformer
__lowerCamelCase = special_visual_initialize
| 622 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """poolformer"""
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=4.0 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[64, 128, 320, 512] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[2, 1, 1, 1] , __UpperCAmelCase=4 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = pool_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = mlp_ratio
__lowerCamelCase = depths
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_layer_scale
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = initializer_range
super().__init__(**__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 2E-3
| 622 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , '''num_attention_heads''' ) )
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=640 , __UpperCAmelCase=4 , __UpperCAmelCase="silu" , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = last_hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = conv_kernel_size
__lowerCamelCase = output_stride
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = use_labels
__lowerCamelCase = is_training
__lowerCamelCase = num_labels
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MobileViTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MobileViTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MobileViTForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowerCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MobileViTModelTester(self )
__lowerCamelCase = MobileViTConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = 5
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowerCamelCase = 2
for i in range(len(__UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = MobileViTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def a__ ( ):
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__UpperCAmelCase )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**__UpperCAmelCase )
# verify the logits
__lowerCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__lowerCamelCase = model.to(__UpperCAmelCase )
__lowerCamelCase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**__UpperCAmelCase )
__lowerCamelCase = outputs.logits
# verify the logits
__lowerCamelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__lowerCamelCase = model.to(__UpperCAmelCase )
__lowerCamelCase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**__UpperCAmelCase )
__lowerCamelCase = outputs.logits.detach().cpu()
__lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(50, 60)] )
__lowerCamelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__lowerCamelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 622 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """visual_bert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=512 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = visual_embedding_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = bypass_transformer
__lowerCamelCase = special_visual_initialize
| 622 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def lowerCamelCase ( self ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCamelCase ( self ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = torch.arange(self.height * self.width )
__lowerCamelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(__UpperCAmelCase , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,*__lowerCamelCase = self.shape
__lowerCamelCase = int(np.prod(__UpperCAmelCase ) )
__lowerCamelCase = self.get_image_coords()
__lowerCamelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowerCamelCase = self.get_camera_rays(__UpperCAmelCase )
__lowerCamelCase = rays.view(__UpperCAmelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,*__lowerCamelCase ,__lowerCamelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase = coords.view(__UpperCAmelCase , -1 , 2 )
__lowerCamelCase = self.resolution()
__lowerCamelCase = self.fov()
__lowerCamelCase = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase = fracs * torch.tan(fov / 2 )
__lowerCamelCase = fracs.view(__UpperCAmelCase , -1 , 2 )
__lowerCamelCase = (
self.z.view(__UpperCAmelCase , 1 , 3 )
+ self.x.view(__UpperCAmelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__UpperCAmelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__lowerCamelCase = directions / directions.norm(dim=-1 , keepdim=__UpperCAmelCase )
__lowerCamelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(__UpperCAmelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__UpperCAmelCase , *__UpperCAmelCase , 2 , 3 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__UpperCAmelCase , height=__UpperCAmelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
__lowerCamelCase = np.array([np.sin(_UpperCamelCase ), np.cos(_UpperCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase = -z * 4
__lowerCamelCase = np.array([np.cos(_UpperCamelCase ), -np.sin(_UpperCamelCase ), 0.0] )
__lowerCamelCase = np.cross(_UpperCamelCase ,_UpperCamelCase )
origins.append(_UpperCamelCase )
xs.append(_UpperCamelCase )
ys.append(_UpperCamelCase )
zs.append(_UpperCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_UpperCamelCase ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(_UpperCamelCase ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(_UpperCamelCase ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(_UpperCamelCase ,axis=0 ) ).float() ,width=_UpperCamelCase ,height=_UpperCamelCase ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(_UpperCamelCase )) ,)
| 622 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """spiece.model"""}
a_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a_ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a_ = """▁"""
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase = (
AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else mask_token
)
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.remove_space:
__lowerCamelCase = ''' '''.join(inputs.strip().split() )
else:
__lowerCamelCase = inputs
__lowerCamelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__lowerCamelCase = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
__lowerCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
__lowerCamelCase = outputs.lower()
return outputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.preprocess_text(__UpperCAmelCase )
__lowerCamelCase = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
__lowerCamelCase = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCamelCase = cur_pieces[1:]
else:
__lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = ''''''
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowerCamelCase = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 622 | 1 |
# using dfs for finding eulerian path traversal
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : Union[str, Any]=None ):
__lowerCamelCase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__lowerCamelCase ,__lowerCamelCase = True, True
__lowerCamelCase = dfs(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
return path
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = 0
__lowerCamelCase = -1
for i in range(_UpperCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__lowerCamelCase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Optional[int] ):
__lowerCamelCase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__lowerCamelCase ,__lowerCamelCase = check_circuit_or_path(_UpperCamelCase ,_UpperCamelCase )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
__lowerCamelCase = 1
if check == 2:
__lowerCamelCase = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
__lowerCamelCase = dfs(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
print(_UpperCamelCase )
def a__ ( ):
__lowerCamelCase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__lowerCamelCase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__lowerCamelCase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__lowerCamelCase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__lowerCamelCase = {
1: [],
2: []
# all degree is zero
}
__lowerCamelCase = 10
check_euler(_UpperCamelCase ,_UpperCamelCase )
check_euler(_UpperCamelCase ,_UpperCamelCase )
check_euler(_UpperCamelCase ,_UpperCamelCase )
check_euler(_UpperCamelCase ,_UpperCamelCase )
check_euler(_UpperCamelCase ,_UpperCamelCase )
if __name__ == "__main__":
main()
| 622 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ = """true"""
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[str]=82 ,_UpperCamelCase : Optional[Any]=16 ):
set_seed(42 )
__lowerCamelCase = RegressionModel()
__lowerCamelCase = deepcopy(_UpperCamelCase )
__lowerCamelCase = RegressionDataset(length=_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=_UpperCamelCase )
model.to(accelerator.device )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return model, ddp_model, dataloader
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : str=False ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ,split='''validation''' )
def tokenize_function(_UpperCamelCase : int ):
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
with accelerator.main_process_first():
__lowerCamelCase = dataset.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
if use_longest:
return tokenizer.pad(_UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' )
return tokenizer.pad(_UpperCamelCase ,padding='''max_length''' ,max_length=1_28 ,return_tensors='''pt''' )
return DataLoader(_UpperCamelCase ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=16 )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : List[str] ):
__lowerCamelCase = Accelerator(dispatch_batches=_UpperCamelCase ,split_batches=_UpperCamelCase )
__lowerCamelCase = get_dataloader(_UpperCamelCase ,not dispatch_batches )
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' ,return_dict=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = []
for batch in dataloader:
__lowerCamelCase ,__lowerCamelCase = batch.values()
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCamelCase ,__lowerCamelCase = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCamelCase )
targs.append(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch.cat(_UpperCamelCase ), torch.cat(_UpperCamelCase )
return logits, targs
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : List[Any]=82 ,_UpperCamelCase : str=False ,_UpperCamelCase : List[str]=False ,_UpperCamelCase : Optional[int]=16 ):
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = get_basic_setup(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = generate_predictions(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
assert (
len(_UpperCamelCase ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCamelCase )}"""
def a__ ( _UpperCamelCase : bool = False ,_UpperCamelCase : bool = False ):
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
__lowerCamelCase ,__lowerCamelCase = get_mrpc_setup(_UpperCamelCase ,_UpperCamelCase )
# First do baseline
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''no''']
model.to(_UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCamelCase )
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCamelCase ,references=batch['''labels'''] )
__lowerCamelCase = metric.compute()
# Then do distributed
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase = batch['''labels''']
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCamelCase ,references=_UpperCamelCase )
__lowerCamelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def a__ ( ):
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_UpperCamelCase ,_UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__lowerCamelCase = Accelerator()
test_torch_metrics(_UpperCamelCase ,5_12 )
accelerator.state._reset_state()
def a__ ( _UpperCamelCase : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 622 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """Whether tp freeze the encoder."""} )
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase__ = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
lowerCAmelCase__ = field(
default=1_0_2_4 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(
default=1_4_2 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
lowerCAmelCase__ = field(
default=1_4_2 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
lowerCAmelCase__ = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
lowerCAmelCase__ = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """Source language id for translation."""} )
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """Target language id for translation."""} )
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """# num_beams to use for evaluation."""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[int] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_UpperCamelCase ,os.path.join(_UpperCamelCase ,F"""{split}_results.json""" ) )
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) ,training_args.fpaa ,)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' ,_UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
__lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ):
assert hasattr(_UpperCamelCase ,_UpperCamelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_UpperCamelCase ,_UpperCamelCase ,getattr(_UpperCamelCase ,_UpperCamelCase ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path ,from_tf='''.ckpt''' in model_args.model_name_or_path ,config=_UpperCamelCase ,cache_dir=model_args.cache_dir ,)
# use task specific params
use_task_specific_params(_UpperCamelCase ,data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__lowerCamelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCamelCase ,(MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__lowerCamelCase = SeqaSeqDataset
# Get datasets
__lowerCamelCase = (
dataset_class(
_UpperCamelCase ,type_path='''train''' ,data_dir=data_args.data_dir ,n_obs=data_args.n_train ,max_target_length=data_args.max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '''''' ,)
if training_args.do_train
else None
)
__lowerCamelCase = (
dataset_class(
_UpperCamelCase ,type_path='''val''' ,data_dir=data_args.data_dir ,n_obs=data_args.n_val ,max_target_length=data_args.val_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '''''' ,)
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__lowerCamelCase = (
dataset_class(
_UpperCamelCase ,type_path='''test''' ,data_dir=data_args.data_dir ,n_obs=data_args.n_test ,max_target_length=data_args.test_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or '''''' ,)
if training_args.do_predict
else None
)
# Initialize our Trainer
__lowerCamelCase = (
build_compute_metrics_fn(data_args.task ,_UpperCamelCase ) if training_args.predict_with_generate else None
)
__lowerCamelCase = SeqaSeqTrainer(
model=_UpperCamelCase ,args=_UpperCamelCase ,data_args=_UpperCamelCase ,train_dataset=_UpperCamelCase ,eval_dataset=_UpperCamelCase ,data_collator=SeqaSeqDataCollator(
_UpperCamelCase ,_UpperCamelCase ,model.config.decoder_start_token_id ,training_args.tpu_num_cores ) ,compute_metrics=_UpperCamelCase ,tokenizer=_UpperCamelCase ,)
__lowerCamelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__lowerCamelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__lowerCamelCase = train_result.metrics
__lowerCamelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' ,_UpperCamelCase ,training_args.output_dir )
all_metrics.update(_UpperCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,'''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' )
__lowerCamelCase = data_args.n_val
__lowerCamelCase = round(metrics['''val_loss'''] ,4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' ,_UpperCamelCase ,training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__lowerCamelCase = trainer.predict(test_dataset=_UpperCamelCase ,metric_key_prefix='''test''' )
__lowerCamelCase = test_output.metrics
__lowerCamelCase = data_args.n_test
if trainer.is_world_process_zero():
__lowerCamelCase = round(metrics['''test_loss'''] ,4 )
handle_metrics('''test''' ,_UpperCamelCase ,training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.predict_with_generate:
__lowerCamelCase = tokenizer.batch_decode(
test_output.predictions ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
__lowerCamelCase = lmap(str.strip ,_UpperCamelCase )
write_txt_file(_UpperCamelCase ,os.path.join(training_args.output_dir ,'''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(_UpperCamelCase ,os.path.join(training_args.output_dir ,'''all_results.json''' ) )
return all_metrics
def a__ ( _UpperCamelCase : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 622 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCamelCase = CLIPTextModel(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = CLIPTextModelWithProjection(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__lowerCamelCase = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = sd_pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = negative_prompt
__lowerCamelCase = 3 * [inputs['''prompt''']]
__lowerCamelCase = sd_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
__lowerCamelCase = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
__lowerCamelCase = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_inputs(__UpperCAmelCase )
__lowerCamelCase = pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 622 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 |
import torch
from diffusers import StableDiffusionPipeline
a_ = """path-to-your-trained-model"""
a_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
a_ = """A photo of sks dog in a bucket"""
a_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 622 | 1 |
def a__ ( _UpperCamelCase : int = 50_00_00_00 ):
__lowerCamelCase = set()
__lowerCamelCase = int((limit - 24) ** (1 / 2) )
__lowerCamelCase = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,_UpperCamelCase ) ) )
for primea in primes:
__lowerCamelCase = primea * primea
for primea in primes:
__lowerCamelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCamelCase = primea * primea * primea * primea
__lowerCamelCase = square + cube + tetr
if total >= limit:
break
ret.add(_UpperCamelCase )
return len(_UpperCamelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 622 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
def a__ ( _UpperCamelCase : List[str] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
__lowerCamelCase = '''What is the placebo?'''
__lowerCamelCase = [
{
'''image''': load_image(__UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = dqa_pipeline(__UpperCAmelCase , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase ), '''start''': ANY(__UpperCAmelCase ), '''end''': ANY(__UpperCAmelCase )},
{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase ), '''start''': ANY(__UpperCAmelCase ), '''end''': ANY(__UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''How many cats are there?'''
__lowerCamelCase = [
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
__lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCAmelCase )
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCAmelCase , revision='''3dc6de3''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
__lowerCamelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCAmelCase )
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
__lowerCamelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = XLMProphetNetTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''[PAD]'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__UpperCAmelCase ) , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = [35389, 6672, 49, 2]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# fmt: off
__lowerCamelCase = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 622 | 1 |
def a__ ( _UpperCamelCase : int ):
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
a_ = int(input("""Enter number: """).strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 622 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a_ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
a_ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def a__ ( _UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = None
# source code of `config_class`
__lowerCamelCase = inspect.getsource(_UpperCamelCase )
__lowerCamelCase = _re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__lowerCamelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__lowerCamelCase = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__lowerCamelCase = ckpt_name
break
return checkpoint
def a__ ( ):
__lowerCamelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__lowerCamelCase = get_checkpoint_from_config_class(_UpperCamelCase )
__lowerCamelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
__lowerCamelCase = '''\n'''.join(sorted(_UpperCamelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 622 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """table-transformer"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=3 , __UpperCAmelCase=100 , __UpperCAmelCase=6 , __UpperCAmelCase=2048 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=2048 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1.0 , __UpperCAmelCase=False , __UpperCAmelCase="sine" , __UpperCAmelCase="resnet50" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , **__UpperCAmelCase , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = backbone_config.get('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(__UpperCAmelCase )
# set timm attributes to None
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None, None, None
__lowerCamelCase = use_timm_backbone
__lowerCamelCase = backbone_config
__lowerCamelCase = num_channels
__lowerCamelCase = num_queries
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = encoder_layers
__lowerCamelCase = auxiliary_loss
__lowerCamelCase = position_embedding_type
__lowerCamelCase = backbone
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = dilation
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = mask_loss_coefficient
__lowerCamelCase = dice_loss_coefficient
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self.d_model
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 1E-5
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 12
| 622 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = RoFormerTokenizer
lowerCAmelCase__ = RoFormerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''永和服装饰品有限公司,今天天气非常好'''
__lowerCamelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a_ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """upernet"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=512 , __UpperCAmelCase=0.02 , __UpperCAmelCase=[1, 2, 3, 6] , __UpperCAmelCase=True , __UpperCAmelCase=0.4 , __UpperCAmelCase=384 , __UpperCAmelCase=256 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=255 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = backbone_config.get('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(__UpperCAmelCase )
__lowerCamelCase = backbone_config
__lowerCamelCase = hidden_size
__lowerCamelCase = initializer_range
__lowerCamelCase = pool_scales
__lowerCamelCase = use_auxiliary_head
__lowerCamelCase = auxiliary_loss_weight
__lowerCamelCase = auxiliary_in_channels
__lowerCamelCase = auxiliary_channels
__lowerCamelCase = auxiliary_num_convs
__lowerCamelCase = auxiliary_concat_input
__lowerCamelCase = loss_ignore_index
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 622 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''cyberpunk 2077'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 622 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """roberta-prelayernorm"""
def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class __lowerCAmelCase ( lowerCAmelCase__ ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 622 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ = getLogger(__name__)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : int = 8 ,_UpperCamelCase : str = DEFAULT_DEVICE ,_UpperCamelCase : Dict=False ,_UpperCamelCase : Dict="summarization" ,_UpperCamelCase : Optional[int]=None ,**_UpperCamelCase : Dict ,):
__lowerCamelCase = Path(_UpperCamelCase ).open('''w''' ,encoding='''utf-8''' )
__lowerCamelCase = str(_UpperCamelCase )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
__lowerCamelCase = model.half()
__lowerCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCamelCase = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase ,_UpperCamelCase )
if prefix is None:
__lowerCamelCase = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase ,_UpperCamelCase ) ) ):
__lowerCamelCase = [prefix + text for text in examples_chunk]
__lowerCamelCase = tokenizer(_UpperCamelCase ,return_tensors='''pt''' ,truncation=_UpperCamelCase ,padding='''longest''' ).to(_UpperCamelCase )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**_UpperCamelCase ,)
__lowerCamelCase = tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowerCamelCase = int(time.time() - start_time ) # seconds
__lowerCamelCase = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def a__ ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a__ ( _UpperCamelCase : Union[str, Any]=True ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' ,type=_UpperCamelCase ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' ,type=_UpperCamelCase ,help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' ,type=_UpperCamelCase ,help='''where to save summaries''' )
parser.add_argument('''--reference_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default='''metrics.json''' ,help='''where to save metrics''' )
parser.add_argument('''--device''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' ,type=_UpperCamelCase ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=_UpperCamelCase ,default=8 ,required=_UpperCamelCase ,help='''batch size''' )
parser.add_argument(
'''--n_obs''' ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--dump-args''' ,action='''store_true''' ,help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' ,nargs='''?''' ,type=_UpperCamelCase ,const=datetime_now() ,help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCamelCase ,__lowerCamelCase = parser.parse_known_args()
__lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowerCamelCase = generate_summaries_or_translations(
_UpperCamelCase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**_UpperCamelCase ,)
if args.reference_path is None:
return {}
# Compute scores
__lowerCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
__lowerCamelCase = score_fn(_UpperCamelCase ,_UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
__lowerCamelCase = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase ,open(args.score_path ,'''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 622 | 1 |
from itertools import count
def a__ ( _UpperCamelCase : int = 50 ):
__lowerCamelCase = [1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase ,n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 622 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[str] ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : Any=None ):
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(_UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
lowerCAmelCase__ = OPTConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = """gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = embed_dim
__lowerCamelCase = word_embed_proj_dim
__lowerCamelCase = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__UpperCAmelCase , **self.config_updates , )
__lowerCamelCase = prepare_opt_inputs_dict(__UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel(config=__UpperCAmelCase )
__lowerCamelCase = inputs_dict['''input_ids''']
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCamelCase = model_class(config=__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __UpperCAmelCase )
# check that weights remain the same after resizing
__lowerCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __UpperCAmelCase )
__lowerCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
return tf.constant(_UpperCamelCase ,dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCamelCase = input_ids.shape[0]
__lowerCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowerCamelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase = tf.not_equal(__UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
__lowerCamelCase = model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase ).last_hidden_state
__lowerCamelCase = (1, 11, 512)
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-3 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = xla_generate(__UpperCAmelCase , __UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = '''facebook/opt-350m'''
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCamelCase = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-125m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = '''left'''
# use different length sentences to test batching
__lowerCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase )
__lowerCamelCase = inputs['''input_ids''']
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''] )
__lowerCamelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase )
__lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowerCamelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 622 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[Any]=() ,_UpperCamelCase : Dict=None ,_UpperCamelCase : List[Any]="no" ,_UpperCamelCase : Dict="29500" ):
__lowerCamelCase = False
__lowerCamelCase = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
__lowerCamelCase = True
elif "IPython" in sys.modules:
__lowerCamelCase = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
__lowerCamelCase = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' ,_UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
__lowerCamelCase = 8
__lowerCamelCase = PrepareForLaunch(_UpperCamelCase ,distributed_type='''TPU''' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCamelCase ,args=_UpperCamelCase ,nprocs=_UpperCamelCase ,start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase ,master_addr='''127.0.01''' ,master_port=_UpperCamelCase ,mixed_precision=_UpperCamelCase ):
__lowerCamelCase = PrepareForLaunch(_UpperCamelCase ,distributed_type='''MULTI_GPU''' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCamelCase ,args=_UpperCamelCase ,nprocs=_UpperCamelCase ,start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__lowerCamelCase = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_UpperCamelCase )
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : List[Any]=() ,_UpperCamelCase : Optional[int]=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase ,master_addr='''127.0.01''' ,master_port='''29500''' ,accelerate_mixed_precision='''no''' ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu='''yes''' ,):
__lowerCamelCase = PrepareForLaunch(_UpperCamelCase ,debug=_UpperCamelCase )
start_processes(_UpperCamelCase ,args=_UpperCamelCase ,nprocs=_UpperCamelCase ,start_method='''fork''' )
| 622 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ = logging.getLogger(__name__)
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any] ):
__lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 )
return np.sum(outputs == labels )
def a__ ( _UpperCamelCase : Optional[int] ):
with open(_UpperCamelCase ,encoding='''utf_8''' ) as f:
__lowerCamelCase = csv.reader(_UpperCamelCase )
__lowerCamelCase = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Dict ):
__lowerCamelCase = []
for dataset in encoded_datasets:
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch, 2) ,dtype=np.intaa )
__lowerCamelCase = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = mc_label
__lowerCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' ,type=_UpperCamelCase ,default='''openai-gpt''' ,help='''pretrained model name''' )
parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
parser.add_argument('''--train_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--eval_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--seed''' ,type=_UpperCamelCase ,default=42 )
parser.add_argument('''--num_train_epochs''' ,type=_UpperCamelCase ,default=3 )
parser.add_argument('''--train_batch_size''' ,type=_UpperCamelCase ,default=8 )
parser.add_argument('''--eval_batch_size''' ,type=_UpperCamelCase ,default=16 )
parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=_UpperCamelCase ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' ,type=_UpperCamelCase ,default=1 )
parser.add_argument(
'''--max_steps''' ,default=-1 ,type=_UpperCamelCase ,help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) ,)
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,)
parser.add_argument('''--learning_rate''' ,type=_UpperCamelCase ,default=6.25e-5 )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=_UpperCamelCase ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' ,type=_UpperCamelCase ,default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' ,type=_UpperCamelCase ,default=0.01 )
parser.add_argument('''--lm_coef''' ,type=_UpperCamelCase ,default=0.9 )
parser.add_argument('''--n_valid''' ,type=_UpperCamelCase ,default=3_74 )
parser.add_argument('''--server_ip''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
__lowerCamelCase = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowerCamelCase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase ,_UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowerCamelCase = ['''_start_''', '''_delimiter_''', '''_classify_''']
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase : Dict ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__lowerCamelCase = load_rocstories_dataset(args.train_dataset )
__lowerCamelCase = load_rocstories_dataset(args.eval_dataset )
__lowerCamelCase = (train_dataset, eval_dataset)
__lowerCamelCase = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
__lowerCamelCase = model.config.n_positions // 2 - 2
__lowerCamelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowerCamelCase = min(_UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowerCamelCase = pre_process_datasets(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,*_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = tensor_datasets[0], tensor_datasets[1]
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = RandomSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.train_batch_size )
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = SequentialSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowerCamelCase = args.max_steps
__lowerCamelCase = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
__lowerCamelCase = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowerCamelCase = list(model.named_parameters() )
__lowerCamelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__lowerCamelCase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__lowerCamelCase = AdamW(_UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon )
__lowerCamelCase = get_linear_schedule_with_warmup(
_UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=_UpperCamelCase )
if args.do_train:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ):
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = tqdm(_UpperCamelCase ,desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
__lowerCamelCase = model(_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowerCamelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowerCamelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowerCamelCase = model.module if hasattr(_UpperCamelCase ,'''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
torch.save(model_to_save.state_dict() ,_UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
__lowerCamelCase ,__lowerCamelCase = 0, 0
__lowerCamelCase ,__lowerCamelCase = 0, 0
for batch in tqdm(_UpperCamelCase ,desc='''Evaluating''' ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
with torch.no_grad():
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model(
_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = mc_logits.detach().cpu().numpy()
__lowerCamelCase = mc_labels.to('''cpu''' ).numpy()
__lowerCamelCase = accuracy(_UpperCamelCase ,_UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowerCamelCase = eval_loss / nb_eval_steps
__lowerCamelCase = eval_accuracy / nb_eval_examples
__lowerCamelCase = tr_loss / nb_tr_steps if args.do_train else None
__lowerCamelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__lowerCamelCase = os.path.join(args.output_dir ,'''eval_results.txt''' )
with open(_UpperCamelCase ,'''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' ,_UpperCamelCase ,str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 622 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def a__ ( _UpperCamelCase : np.ndarray ,_UpperCamelCase : Union[int, Iterable[int]] ,_UpperCamelCase : bool ,_UpperCamelCase : int ):
def constraint_to_multiple_of(_UpperCamelCase : List[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : str=0 ,_UpperCamelCase : List[Any]=None ):
__lowerCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowerCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
__lowerCamelCase = math.ceil(val / multiple ) * multiple
return x
__lowerCamelCase = (output_size, output_size) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else output_size
__lowerCamelCase ,__lowerCamelCase = get_image_size(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = output_size
# determine new height and width
__lowerCamelCase = output_height / input_height
__lowerCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowerCamelCase = scale_width
else:
# fit height
__lowerCamelCase = scale_height
__lowerCamelCase = constraint_to_multiple_of(scale_height * input_height ,multiple=_UpperCamelCase )
__lowerCamelCase = constraint_to_multiple_of(scale_width * input_width ,multiple=_UpperCamelCase )
return (new_height, new_width)
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = False , __UpperCAmelCase = 1 , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
__lowerCamelCase = get_size_dict(__UpperCAmelCase )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = keep_aspect_ratio
__lowerCamelCase = ensure_multiple_of
__lowerCamelCase = resample
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = 1 , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCamelCase = get_resize_output_image_size(
__UpperCAmelCase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=__UpperCAmelCase , multiple=__UpperCAmelCase , )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(__UpperCAmelCase )
__lowerCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowerCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__UpperCAmelCase ):
__lowerCamelCase = target_sizes.numpy()
__lowerCamelCase = []
for idx in range(len(__UpperCAmelCase ) ):
__lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__UpperCAmelCase )
__lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCAmelCase )
else:
__lowerCamelCase = logits.argmax(dim=1 )
__lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 622 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1024 , __UpperCAmelCase=1024 , __UpperCAmelCase=3.6 ):
'''simple docstring'''
__lowerCamelCase = tokenizer
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = dataset
__lowerCamelCase = seq_length
__lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = iter(self.dataset )
__lowerCamelCase = True
while more_examples:
__lowerCamelCase ,__lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__UpperCAmelCase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowerCamelCase = False
break
__lowerCamelCase = tokenizer(__UpperCAmelCase , truncation=__UpperCAmelCase )['''input_ids''']
__lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__UpperCAmelCase ) , self.seq_length ):
__lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(__UpperCAmelCase ) == self.seq_length:
yield torch.tensor(__UpperCAmelCase )
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = {'''streaming''': True}
__lowerCamelCase = load_dataset(args.dataset_name ,split='''train''' ,**_UpperCamelCase )
__lowerCamelCase = ConstantLengthDataset(_UpperCamelCase ,_UpperCamelCase ,seq_length=args.seq_length )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=args.batch_size )
return eval_dataloader
def a__ ( _UpperCamelCase : str ):
model.eval()
__lowerCamelCase = []
for step, batch in enumerate(_UpperCamelCase ):
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase ,labels=_UpperCamelCase )
__lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCamelCase = torch.mean(torch.cat(_UpperCamelCase ) )
try:
__lowerCamelCase = torch.exp(_UpperCamelCase )
except OverflowError:
__lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
a_ = Accelerator()
# Parse configuration
a_ = HfArgumentParser(EvaluationArguments)
a_ = parser.parse_args()
set_seed(args.seed)
# Logging
a_ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
a_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
a_ , a_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
a_ , a_ = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 622 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
a_ = TypeVar("""U""")
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = key
__lowerCamelCase = val
__lowerCamelCase = None
__lowerCamelCase = None
def __repr__( self ):
'''simple docstring'''
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self ):
'''simple docstring'''
__lowerCamelCase = DoubleLinkedListNode(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = DoubleLinkedListNode(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = self.rear, self.head
def __repr__( self ):
'''simple docstring'''
__lowerCamelCase = ['''DoubleLinkedList''']
__lowerCamelCase = self.head
while node.next is not None:
rep.append(str(__UpperCAmelCase ) )
__lowerCamelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowerCamelCase = node
__lowerCamelCase = previous
__lowerCamelCase = node
__lowerCamelCase = self.rear
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__lowerCamelCase = node.next
__lowerCamelCase = node.prev
__lowerCamelCase = None
__lowerCamelCase = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
lowerCAmelCase__ = {}
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = DoubleLinkedList()
__lowerCamelCase = capacity
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = {}
def __repr__( self ):
'''simple docstring'''
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , __UpperCAmelCase ):
'''simple docstring'''
return key in self.cache
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
__lowerCamelCase = self.cache[key]
__lowerCamelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__UpperCAmelCase )
return node.val
self.miss += 1
return None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowerCamelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__UpperCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowerCamelCase = DoubleLinkedListNode(__UpperCAmelCase , __UpperCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowerCamelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowerCamelCase = value
self.list.add(__UpperCAmelCase )
@classmethod
def lowerCamelCase ( cls , __UpperCAmelCase = 128 ):
'''simple docstring'''
def cache_decorator_inner(__UpperCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*__UpperCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowerCamelCase = LRUCache(__UpperCAmelCase )
__lowerCamelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowerCamelCase = func(*__UpperCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , __UpperCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__UpperCAmelCase , '''cache_info''' , __UpperCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 622 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """lxmert"""
lowerCAmelCase__ = {}
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=9500 , __UpperCAmelCase=1600 , __UpperCAmelCase=400 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=9 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=2048 , __UpperCAmelCase=4 , __UpperCAmelCase=6.67 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = num_qa_labels
__lowerCamelCase = num_object_labels
__lowerCamelCase = num_attr_labels
__lowerCamelCase = l_layers
__lowerCamelCase = x_layers
__lowerCamelCase = r_layers
__lowerCamelCase = visual_feat_dim
__lowerCamelCase = visual_pos_dim
__lowerCamelCase = visual_loss_normalizer
__lowerCamelCase = task_matched
__lowerCamelCase = task_mask_lm
__lowerCamelCase = task_obj_predict
__lowerCamelCase = task_qa
__lowerCamelCase = visual_obj_loss
__lowerCamelCase = visual_attr_loss
__lowerCamelCase = visual_feat_loss
__lowerCamelCase = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__UpperCAmelCase )
| 622 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a_ = ["""small""", """medium""", """large"""]
a_ = """lm_head.decoder.weight"""
a_ = """lm_head.weight"""
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
__lowerCamelCase = torch.load(_UpperCamelCase )
__lowerCamelCase = d.pop(_UpperCamelCase )
os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase )
torch.save(_UpperCamelCase ,os.path.join(_UpperCamelCase ,_UpperCamelCase ) )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
a_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a_ = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
a_ = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 622 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length, 2) ,_UpperCamelCase )
else:
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length) ,_UpperCamelCase )
for i, tensor in enumerate(_UpperCamelCase ):
if padding_side == "right":
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
else:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = ord(_UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__lowerCamelCase = unicodedata.category(_UpperCamelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = -1_0_0
lowerCAmelCase__ = "pt"
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
import torch
__lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCamelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCamelCase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__lowerCamelCase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowerCamelCase = self.tokenizer.padding_side
if padding_side == "right":
__lowerCamelCase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowerCamelCase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowerCamelCase = [feature['''ner_tags'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = [feature['''original_entity_spans'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 622 | 1 |
def a__ ( _UpperCamelCase : int = 10**9 ):
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowerCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"{solution() = }")
| 622 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=[1, 1, 2] , __UpperCAmelCase=1 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=3 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = 2
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCamelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCamelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCamelCase = self.num_hidden_layers + 2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForPreTraining(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForMaskedLM(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self , base=__UpperCAmelCase )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 622 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
a_ = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase = 14 ):
'''simple docstring'''
if group not in primes:
raise ValueError('''Unsupported Group''' )
__lowerCamelCase = primes[group]['''prime''']
__lowerCamelCase = primes[group]['''generator''']
__lowerCamelCase = int(hexlify(urandom(32 ) ) , base=16 )
def lowerCamelCase ( self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pow(self.generator , self.__private_key , self.prime )
return hex(__UpperCAmelCase )[2:]
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__UpperCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = int(__UpperCAmelCase , base=16 )
if not self.is_valid_public_key(__UpperCAmelCase ):
raise ValueError('''Invalid public key''' )
__lowerCamelCase = pow(__UpperCAmelCase , self.__private_key , self.prime )
return shaaaa(str(__UpperCAmelCase ).encode() ).hexdigest()
@staticmethod
def lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__UpperCAmelCase , (prime - 1) // 2 , __UpperCAmelCase ) == 1
)
@staticmethod
def lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 14 ):
'''simple docstring'''
__lowerCamelCase = int(__UpperCAmelCase , base=16 )
__lowerCamelCase = int(__UpperCAmelCase , base=16 )
__lowerCamelCase = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''Invalid public key''' )
__lowerCamelCase = pow(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return shaaaa(str(__UpperCAmelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 622 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("""covid_data""", """cases deaths recovered""")
def a__ ( _UpperCamelCase : str = "https://www.worldometers.info/coronavirus/" ):
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(_UpperCamelCase ).content ).xpath(_UpperCamelCase ) )
a_ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 622 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_5_0, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=__UpperCAmelCase , )
assert hasattr(self , '''env''' )
def lowerCamelCase ( self , __UpperCAmelCase=1 ):
'''simple docstring'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowerCamelCase ( self ):
'''simple docstring'''
# create estimator
__lowerCamelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __UpperCAmelCase )
| 622 |
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str = " " ):
__lowerCamelCase = []
__lowerCamelCase = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
__lowerCamelCase = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 622 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a_ = logging.get_logger(__name__)
def a__ ( _UpperCamelCase : bool ,_UpperCamelCase : bool ):
def run_func(_UpperCamelCase : List[str] ):
@wraps(_UpperCamelCase )
def run_in_eager_mode(*_UpperCamelCase : List[str] ,**_UpperCamelCase : str ):
return func(*_UpperCamelCase ,**_UpperCamelCase )
@wraps(_UpperCamelCase )
@tf.function(experimental_compile=_UpperCamelCase )
def run_in_graph_mode(*_UpperCamelCase : Tuple ,**_UpperCamelCase : Optional[Any] ):
return func(*_UpperCamelCase ,**_UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : int ):
__lowerCamelCase = random.Random()
__lowerCamelCase = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_UpperCamelCase ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = "TensorFlow"
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return tf.__version__
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# initialize GPU on separate process
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowerCamelCase = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_speed(_inference )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowerCamelCase = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_speed(_train )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase )
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowerCamelCase = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_memory(_inference )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase )
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowerCamelCase = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_memory(_train )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
__lowerCamelCase = (
hasattr(__UpperCAmelCase , '''architectures''' )
and isinstance(config.architectures , __UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowerCamelCase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__lowerCamelCase = __import__('''transformers''' , fromlist=[model_class] )
__lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = model_cls(__UpperCAmelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
__lowerCamelCase = TF_MODEL_MAPPING[config.__class__](__UpperCAmelCase )
# encoder-decoder has vocab size saved differently
__lowerCamelCase = config.vocab_size if hasattr(__UpperCAmelCase , '''vocab_size''' ) else config.encoder.vocab_size
__lowerCamelCase = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , training=__UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCAmelCase , training=__UpperCAmelCase )
__lowerCamelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
__lowerCamelCase = (
hasattr(__UpperCAmelCase , '''architectures''' )
and isinstance(config.architectures , __UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowerCamelCase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__lowerCamelCase = __import__('''transformers''' , fromlist=[model_class] )
__lowerCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = model_cls(__UpperCAmelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
__lowerCamelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCAmelCase )
# encoder-decoder has vocab size saved differently
__lowerCamelCase = config.vocab_size if hasattr(__UpperCAmelCase , '''vocab_size''' ) else config.encoder.vocab_size
__lowerCamelCase = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__lowerCamelCase = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0]
__lowerCamelCase = tf.gradients(__UpperCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__lowerCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0]
__lowerCamelCase = tf.gradients(__UpperCAmelCase , model.trainable_variables )
return gradients
__lowerCamelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(__UpperCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__lowerCamelCase = timeit.repeat(
__UpperCAmelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
__lowerCamelCase = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
__lowerCamelCase = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
__lowerCamelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__lowerCamelCase = nvml.nvmlDeviceGetMemoryInfo(__UpperCAmelCase )
__lowerCamelCase = meminfo.used
__lowerCamelCase = Memory(__UpperCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
__lowerCamelCase = None
else:
__lowerCamelCase = measure_peak_memory_cpu(__UpperCAmelCase )
__lowerCamelCase = Memory(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
__lowerCamelCase = stop_memory_tracing(__UpperCAmelCase )
if memory is None:
__lowerCamelCase = summary.total
else:
__lowerCamelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 622 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = 8
# DPR tok
__lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__lowerCamelCase = os.path.join(self.tmpdirname , '''dataset''' )
__lowerCamelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCamelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__lowerCamelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__lowerCamelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) )
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
import torch
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
__lowerCamelCase = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary.
| 622 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = SwinConfig(image_size=1_92 )
if "base" in model_name:
__lowerCamelCase = 6
__lowerCamelCase = 1_28
__lowerCamelCase = (2, 2, 18, 2)
__lowerCamelCase = (4, 8, 16, 32)
elif "large" in model_name:
__lowerCamelCase = 12
__lowerCamelCase = 1_92
__lowerCamelCase = (2, 2, 18, 2)
__lowerCamelCase = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__lowerCamelCase = window_size
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = num_heads
return config
def a__ ( _UpperCamelCase : Union[str, Any] ):
if "encoder.mask_token" in name:
__lowerCamelCase = name.replace('''encoder.mask_token''' ,'''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__lowerCamelCase = name.replace('''encoder.patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__lowerCamelCase = name.replace('''encoder.patch_embed.norm''' ,'''embeddings.norm''' )
if "attn.proj" in name:
__lowerCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
__lowerCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
__lowerCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
__lowerCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if name == "encoder.norm.weight":
__lowerCamelCase = '''layernorm.weight'''
if name == "encoder.norm.bias":
__lowerCamelCase = '''layernorm.bias'''
if "decoder" in name:
pass
else:
__lowerCamelCase = '''swin.''' + name
return name
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Optional[int] ):
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(_UpperCamelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = int(key_split[2] )
__lowerCamelCase = int(key_split[4] )
__lowerCamelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[
dim : dim * 2, :
]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val[
:dim
]
__lowerCamelCase = val[
dim : dim * 2
]
__lowerCamelCase = val[
-dim:
]
else:
__lowerCamelCase = val
return orig_state_dict
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : int ,_UpperCamelCase : Optional[int] ):
__lowerCamelCase = torch.load(_UpperCamelCase ,map_location='''cpu''' )['''model''']
__lowerCamelCase = get_swin_config(_UpperCamelCase )
__lowerCamelCase = SwinForMaskedImageModeling(_UpperCamelCase )
model.eval()
__lowerCamelCase = convert_state_dict(_UpperCamelCase ,_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = ViTImageProcessor(size={'''height''': 1_92, '''width''': 1_92} )
__lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw )
__lowerCamelCase = image_processor(images=_UpperCamelCase ,return_tensors='''pt''' )
with torch.no_grad():
__lowerCamelCase = model(**_UpperCamelCase ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 622 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """poolformer"""
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=4.0 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[64, 128, 320, 512] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[2, 1, 1, 1] , __UpperCAmelCase=4 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = pool_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = mlp_ratio
__lowerCamelCase = depths
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_layer_scale
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = initializer_range
super().__init__(**__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 2E-3
| 622 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=36 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=6 , __UpperCAmelCase=6 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=1000 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = coordinate_size
__lowerCamelCase = shape_size
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase = text_seq_length
__lowerCamelCase = (image_size // patch_size) ** 2 + 1
__lowerCamelCase = self.text_seq_length + self.image_seq_length
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase = bbox[i, j, 3]
__lowerCamelCase = bbox[i, j, 1]
__lowerCamelCase = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase = bbox[i, j, 2]
__lowerCamelCase = bbox[i, j, 0]
__lowerCamelCase = tmp_coordinate
__lowerCamelCase = tf.constant(__UpperCAmelCase )
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFLayoutLMvaModel(config=__UpperCAmelCase )
# text + image
__lowerCamelCase = model(__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase )
__lowerCamelCase = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , training=__UpperCAmelCase , )
__lowerCamelCase = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase = model({'''pixel_values''': pixel_values} , training=__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFLayoutLMvaForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFLayoutLMvaForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = 2
__lowerCamelCase = TFLayoutLMvaForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , training=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
((__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase)) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return True
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(__UpperCAmelCase )
if model_class in get_values(__UpperCAmelCase ):
__lowerCamelCase = {
k: tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
__lowerCamelCase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCAmelCase ):
__lowerCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCAmelCase ):
__lowerCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__UpperCAmelCase ):
__lowerCamelCase = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFLayoutLMvaModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
if getattr(__UpperCAmelCase , '''hf_compute_loss''' , __UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__lowerCamelCase = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__UpperCAmelCase )[0]
]
__lowerCamelCase = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__lowerCamelCase = prepared_for_class.pop('''input_ids''' )
__lowerCamelCase = model(__UpperCAmelCase , **__UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__lowerCamelCase = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
__lowerCamelCase = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase = -100
__lowerCamelCase = tf.convert_to_tensor(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , **__UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase = inspect.signature(model.call ).parameters
__lowerCamelCase = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase = {0: '''input_ids'''}
for label_key in label_keys:
__lowerCamelCase = signature_names.index(__UpperCAmelCase )
__lowerCamelCase = label_key
__lowerCamelCase = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase = prepared_for_class[value]
__lowerCamelCase = tuple(__UpperCAmelCase )
# Send to model
__lowerCamelCase = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFLayoutLMvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def a__ ( ):
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) if is_vision_available() else None
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='''tf''' ).pixel_values
__lowerCamelCase = tf.constant([[1, 2]] )
__lowerCamelCase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
__lowerCamelCase = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __UpperCAmelCase )
__lowerCamelCase = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 622 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """visual_bert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=512 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = visual_embedding_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = bypass_transformer
__lowerCamelCase = special_visual_initialize
| 622 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
a_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
_UpperCamelCase ,output_loading_info=_UpperCamelCase )
else:
__lowerCamelCase = ProphetNetForConditionalGenerationOld.from_pretrained(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = ProphetNetForConditionalGeneration.from_pretrained(
_UpperCamelCase ,output_loading_info=_UpperCamelCase )
__lowerCamelCase = ['''key_proj''', '''value_proj''', '''query_proj''']
__lowerCamelCase = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase = key.split('''.''' )
if attributes[0] == "lm_head":
__lowerCamelCase = prophet
__lowerCamelCase = prophet_old
else:
__lowerCamelCase = prophet.prophetnet
__lowerCamelCase = prophet_old.model
__lowerCamelCase = False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase = mapping[attribute]
if not hasattr(_UpperCamelCase ,_UpperCamelCase ) and len(_UpperCamelCase ) > 0:
__lowerCamelCase = attribute
elif hasattr(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
__lowerCamelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase = old_model.bias
logger.info(F"""{attribute} is initialized""" )
__lowerCamelCase = True
break
elif attribute in special_keys and hasattr(_UpperCamelCase ,'''in_proj_weight''' ):
__lowerCamelCase = old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
__lowerCamelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
__lowerCamelCase = True
break
if attribute.isdigit():
__lowerCamelCase = model[int(_UpperCamelCase )]
__lowerCamelCase = old_model[int(_UpperCamelCase )]
else:
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
if old_attribute == "":
__lowerCamelCase = old_model
else:
if not hasattr(_UpperCamelCase ,_UpperCamelCase ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 622 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """spiece.model"""}
a_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
a_ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
a_ = """▁"""
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase = (
AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else mask_token
)
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.remove_space:
__lowerCamelCase = ''' '''.join(inputs.strip().split() )
else:
__lowerCamelCase = inputs
__lowerCamelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__lowerCamelCase = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
__lowerCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
__lowerCamelCase = outputs.lower()
return outputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.preprocess_text(__UpperCAmelCase )
__lowerCamelCase = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
__lowerCamelCase = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCamelCase = cur_pieces[1:]
else:
__lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = ''''''
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowerCamelCase = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 622 | 1 |
from math import sqrt
def a__ ( _UpperCamelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(sqrt(_UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( _UpperCamelCase : int = 1_00_01 ):
__lowerCamelCase = 0
__lowerCamelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(_UpperCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(_UpperCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"{solution() = }")
| 622 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ = """true"""
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[str]=82 ,_UpperCamelCase : Optional[Any]=16 ):
set_seed(42 )
__lowerCamelCase = RegressionModel()
__lowerCamelCase = deepcopy(_UpperCamelCase )
__lowerCamelCase = RegressionDataset(length=_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=_UpperCamelCase )
model.to(accelerator.device )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return model, ddp_model, dataloader
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : str=False ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' ,split='''validation''' )
def tokenize_function(_UpperCamelCase : int ):
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
with accelerator.main_process_first():
__lowerCamelCase = dataset.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
if use_longest:
return tokenizer.pad(_UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' )
return tokenizer.pad(_UpperCamelCase ,padding='''max_length''' ,max_length=1_28 ,return_tensors='''pt''' )
return DataLoader(_UpperCamelCase ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=16 )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : List[str] ):
__lowerCamelCase = Accelerator(dispatch_batches=_UpperCamelCase ,split_batches=_UpperCamelCase )
__lowerCamelCase = get_dataloader(_UpperCamelCase ,not dispatch_batches )
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' ,return_dict=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = []
for batch in dataloader:
__lowerCamelCase ,__lowerCamelCase = batch.values()
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCamelCase ,__lowerCamelCase = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCamelCase )
targs.append(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch.cat(_UpperCamelCase ), torch.cat(_UpperCamelCase )
return logits, targs
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : List[Any]=82 ,_UpperCamelCase : str=False ,_UpperCamelCase : List[str]=False ,_UpperCamelCase : Optional[int]=16 ):
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = get_basic_setup(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = generate_predictions(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
assert (
len(_UpperCamelCase ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCamelCase )}"""
def a__ ( _UpperCamelCase : bool = False ,_UpperCamelCase : bool = False ):
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
__lowerCamelCase ,__lowerCamelCase = get_mrpc_setup(_UpperCamelCase ,_UpperCamelCase )
# First do baseline
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''no''']
model.to(_UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCamelCase )
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCamelCase ,references=batch['''labels'''] )
__lowerCamelCase = metric.compute()
# Then do distributed
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase = batch['''labels''']
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCamelCase ,references=_UpperCamelCase )
__lowerCamelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def a__ ( ):
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_UpperCamelCase ,_UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCamelCase = Accelerator(split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__lowerCamelCase = Accelerator()
test_torch_metrics(_UpperCamelCase ,5_12 )
accelerator.state._reset_state()
def a__ ( _UpperCamelCase : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 622 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = ReformerTokenizer
lowerCAmelCase__ = ReformerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = ReformerTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''<s>'''
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__UpperCAmelCase ) , 1000 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
__lowerCamelCase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(__UpperCAmelCase )
__lowerCamelCase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# Simple input
__lowerCamelCase = '''This is a simple input'''
__lowerCamelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowerCamelCase = ('''This is a simple input''', '''This is a pair''')
__lowerCamelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__UpperCAmelCase , tokenizer_r.encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__UpperCAmelCase , tokenizer_r.batch_encode_plus , __UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ReformerTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__lowerCamelCase = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
__lowerCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowerCamelCase = ''' '''.join(__UpperCAmelCase )
__lowerCamelCase = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' )
__lowerCamelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
__lowerCamelCase = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
__lowerCamelCase = encoded_sequence['''input_ids'''].shape
__lowerCamelCase = ReformerModel(__UpperCAmelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# fmt: off
__lowerCamelCase = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
__lowerCamelCase = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=__UpperCAmelCase , sequences=__UpperCAmelCase , )
| 622 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCamelCase = CLIPTextModel(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = CLIPTextModelWithProjection(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__lowerCamelCase = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = sd_pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = negative_prompt
__lowerCamelCase = 3 * [inputs['''prompt''']]
__lowerCamelCase = sd_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
__lowerCamelCase = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
__lowerCamelCase = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_inputs(__UpperCAmelCase )
__lowerCamelCase = pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 622 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a_ = logging.getLogger()
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__lowerCamelCase = parser.parse_args()
return args.f
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__UpperCAmelCase , '''argv''' , __UpperCAmelCase ):
__lowerCamelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__UpperCAmelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__UpperCAmelCase )
__lowerCamelCase = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__UpperCAmelCase )
__lowerCamelCase = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__UpperCAmelCase )
| 622 |
import torch
from diffusers import StableDiffusionPipeline
a_ = """path-to-your-trained-model"""
a_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
a_ = """A photo of sks dog in a bucket"""
a_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 622 | 1 |
import torch
from diffusers import StableDiffusionPipeline
a_ = """path-to-your-trained-model"""
a_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
a_ = """A photo of sks dog in a bucket"""
a_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 622 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
def a__ ( _UpperCamelCase : List[str] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
__lowerCamelCase = '''What is the placebo?'''
__lowerCamelCase = [
{
'''image''': load_image(__UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = dqa_pipeline(__UpperCAmelCase , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase ), '''start''': ANY(__UpperCAmelCase ), '''end''': ANY(__UpperCAmelCase )},
{'''score''': ANY(__UpperCAmelCase ), '''answer''': ANY(__UpperCAmelCase ), '''start''': ANY(__UpperCAmelCase ), '''end''': ANY(__UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''How many cats are there?'''
__lowerCamelCase = [
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
__lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCAmelCase )
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCAmelCase , revision='''3dc6de3''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowerCamelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
__lowerCamelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCAmelCase )
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowerCamelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__lowerCamelCase = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
__lowerCamelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__lowerCamelCase = INVOICE_URL
__lowerCamelCase = '''What is the invoice number?'''
__lowerCamelCase = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 1 |
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def a__ ( _UpperCamelCase : Matrix ,_UpperCamelCase : Matrix ):
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCamelCase )]
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
for row in range(_UpperCamelCase ):
for col in range(_UpperCamelCase ):
__lowerCamelCase = matrix[row][col]
__lowerCamelCase = vector[row][0]
__lowerCamelCase = 0
__lowerCamelCase = 0
while row < size and col < size:
# pivoting
__lowerCamelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCamelCase ,_UpperCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowerCamelCase ,__lowerCamelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 ,_UpperCamelCase ):
__lowerCamelCase = augmented[rowa][col] / augmented[row][col]
__lowerCamelCase = 0
for cola in range(col + 1 ,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 ,_UpperCamelCase ):
for row in range(_UpperCamelCase ):
__lowerCamelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCamelCase ,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] ,10 )] for row in range(_UpperCamelCase )
]
def a__ ( _UpperCamelCase : list[int] ):
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = [[0 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
__lowerCamelCase = [[0] for _ in range(_UpperCamelCase )]
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
for x_val, y_val in enumerate(_UpperCamelCase ):
for col in range(_UpperCamelCase ):
__lowerCamelCase = (x_val + 1) ** (size - col - 1)
__lowerCamelCase = y_val
__lowerCamelCase = solve(_UpperCamelCase ,_UpperCamelCase )
def interpolated_func(_UpperCamelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCamelCase ) )
return interpolated_func
def a__ ( _UpperCamelCase : int ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def a__ ( _UpperCamelCase : Callable[[int], int] = question_function ,_UpperCamelCase : int = 10 ):
__lowerCamelCase = [func(_UpperCamelCase ) for x_val in range(1 ,order + 1 )]
__lowerCamelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 ,order + 1 )
]
__lowerCamelCase = 0
__lowerCamelCase = 42
__lowerCamelCase = 42
for poly in polynomials:
__lowerCamelCase = 1
while func(_UpperCamelCase ) == poly(_UpperCamelCase ):
x_val += 1
ret += poly(_UpperCamelCase )
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| 622 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = XLMProphetNetTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''[PAD]'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__UpperCAmelCase ) , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = [35389, 6672, 49, 2]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# fmt: off
__lowerCamelCase = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 622 | 1 |
def a__ ( _UpperCamelCase : int = 50 ):
__lowerCamelCase = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"{solution() = }")
| 622 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a_ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
a_ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def a__ ( _UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = None
# source code of `config_class`
__lowerCamelCase = inspect.getsource(_UpperCamelCase )
__lowerCamelCase = _re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__lowerCamelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__lowerCamelCase = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__lowerCamelCase = ckpt_name
break
return checkpoint
def a__ ( ):
__lowerCamelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__lowerCamelCase = get_checkpoint_from_config_class(_UpperCamelCase )
__lowerCamelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
__lowerCamelCase = '''\n'''.join(sorted(_UpperCamelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 622 | 1 |
def a__ ( _UpperCamelCase : int = 50 ):
__lowerCamelCase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"{solution() = }")
| 622 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , lowerCAmelCase__ , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = RobertaConfig
lowerCAmelCase__ = """roberta"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__lowerCamelCase = RobertaEmbeddings(__UpperCAmelCase )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , lowerCAmelCase__ , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = RobertaConfig
lowerCAmelCase__ = """roberta"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__lowerCamelCase = config.num_labels
__lowerCamelCase = config.num_hidden_layers
__lowerCamelCase = DeeRobertaModel(__UpperCAmelCase )
__lowerCamelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=-1 , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = self.num_layers
try:
__lowerCamelCase = self.roberta(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , )
__lowerCamelCase = outputs[1]
__lowerCamelCase = self.dropout(__UpperCAmelCase )
__lowerCamelCase = self.classifier(__UpperCAmelCase )
__lowerCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCamelCase = e.message
__lowerCamelCase = e.exit_layer
__lowerCamelCase = outputs[0]
if not self.training:
__lowerCamelCase = entropy(__UpperCAmelCase )
__lowerCamelCase = []
__lowerCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase = MSELoss()
__lowerCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase = CrossEntropyLoss()
__lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowerCamelCase = []
for highway_exit in outputs[-1]:
__lowerCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(__UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase = MSELoss()
__lowerCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase = CrossEntropyLoss()
__lowerCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__UpperCAmelCase )
if train_highway:
__lowerCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowerCamelCase = (loss,) + outputs
if not self.training:
__lowerCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 622 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = RoFormerTokenizer
lowerCAmelCase__ = RoFormerTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''永和服装饰品有限公司,今天天气非常好'''
__lowerCamelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase ,__lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
| 622 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """cvt"""
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 192, 384] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = num_channels
__lowerCamelCase = patch_sizes
__lowerCamelCase = patch_stride
__lowerCamelCase = patch_padding
__lowerCamelCase = embed_dim
__lowerCamelCase = num_heads
__lowerCamelCase = depth
__lowerCamelCase = mlp_ratio
__lowerCamelCase = attention_drop_rate
__lowerCamelCase = drop_rate
__lowerCamelCase = drop_path_rate
__lowerCamelCase = qkv_bias
__lowerCamelCase = cls_token
__lowerCamelCase = qkv_projection_method
__lowerCamelCase = kernel_qkv
__lowerCamelCase = padding_kv
__lowerCamelCase = stride_kv
__lowerCamelCase = padding_q
__lowerCamelCase = stride_q
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
| 622 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''cyberpunk 2077'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 622 | 1 |
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : list[list[int]] ):
def update_area_of_max_square(_UpperCamelCase : int ,_UpperCamelCase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCamelCase = update_area_of_max_square(_UpperCamelCase ,col + 1 )
__lowerCamelCase = update_area_of_max_square(row + 1 ,col + 1 )
__lowerCamelCase = update_area_of_max_square(row + 1 ,_UpperCamelCase )
if mat[row][col]:
__lowerCamelCase = 1 + min([right, diagonal, down] )
__lowerCamelCase = max(largest_square_area[0] ,_UpperCamelCase )
return sub_problem_sol
else:
return 0
__lowerCamelCase = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
_UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCamelCase = update_area_of_max_square_using_dp_array(_UpperCamelCase ,col + 1 ,_UpperCamelCase )
__lowerCamelCase = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,_UpperCamelCase )
__lowerCamelCase = update_area_of_max_square_using_dp_array(row + 1 ,_UpperCamelCase ,_UpperCamelCase )
if mat[row][col]:
__lowerCamelCase = 1 + min([right, diagonal, down] )
__lowerCamelCase = max(largest_square_area[0] ,_UpperCamelCase )
__lowerCamelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCamelCase = [0]
__lowerCamelCase = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 ,0 ,_UpperCamelCase )
return largest_square_area[0]
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : list[list[int]] ):
__lowerCamelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCamelCase = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__lowerCamelCase = dp_array[row][col + 1]
__lowerCamelCase = dp_array[row + 1][col + 1]
__lowerCamelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCamelCase = 1 + min(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = max(dp_array[row][col] ,_UpperCamelCase )
else:
__lowerCamelCase = 0
return largest_square_area
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : list[list[int]] ):
__lowerCamelCase = [0] * (cols + 1)
__lowerCamelCase = [0] * (cols + 1)
__lowerCamelCase = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__lowerCamelCase = current_row[col + 1]
__lowerCamelCase = next_row[col + 1]
__lowerCamelCase = next_row[col]
if mat[row][col] == 1:
__lowerCamelCase = 1 + min(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = max(current_row[col] ,_UpperCamelCase )
else:
__lowerCamelCase = 0
__lowerCamelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 622 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ = getLogger(__name__)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : int = 8 ,_UpperCamelCase : str = DEFAULT_DEVICE ,_UpperCamelCase : Dict=False ,_UpperCamelCase : Dict="summarization" ,_UpperCamelCase : Optional[int]=None ,**_UpperCamelCase : Dict ,):
__lowerCamelCase = Path(_UpperCamelCase ).open('''w''' ,encoding='''utf-8''' )
__lowerCamelCase = str(_UpperCamelCase )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
__lowerCamelCase = model.half()
__lowerCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCamelCase = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase ,_UpperCamelCase )
if prefix is None:
__lowerCamelCase = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase ,_UpperCamelCase ) ) ):
__lowerCamelCase = [prefix + text for text in examples_chunk]
__lowerCamelCase = tokenizer(_UpperCamelCase ,return_tensors='''pt''' ,truncation=_UpperCamelCase ,padding='''longest''' ).to(_UpperCamelCase )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**_UpperCamelCase ,)
__lowerCamelCase = tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowerCamelCase = int(time.time() - start_time ) # seconds
__lowerCamelCase = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def a__ ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a__ ( _UpperCamelCase : Union[str, Any]=True ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' ,type=_UpperCamelCase ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' ,type=_UpperCamelCase ,help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' ,type=_UpperCamelCase ,help='''where to save summaries''' )
parser.add_argument('''--reference_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default='''metrics.json''' ,help='''where to save metrics''' )
parser.add_argument('''--device''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' ,type=_UpperCamelCase ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=_UpperCamelCase ,default=8 ,required=_UpperCamelCase ,help='''batch size''' )
parser.add_argument(
'''--n_obs''' ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--dump-args''' ,action='''store_true''' ,help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' ,nargs='''?''' ,type=_UpperCamelCase ,const=datetime_now() ,help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCamelCase ,__lowerCamelCase = parser.parse_known_args()
__lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowerCamelCase = generate_summaries_or_translations(
_UpperCamelCase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**_UpperCamelCase ,)
if args.reference_path is None:
return {}
# Compute scores
__lowerCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
__lowerCamelCase = score_fn(_UpperCamelCase ,_UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
__lowerCamelCase = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase ,open(args.score_path ,'''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 622 | 1 |
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = abs(_UpperCamelCase )
__lowerCamelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def a__ ( _UpperCamelCase : int ):
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def a__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable ,_UpperCamelCase : int ) -> None:
__lowerCamelCase = F"""{func.__name__}({value})"""
__lowerCamelCase = timeit(F"""__main__.{call}""" ,setup='''import __main__''' )
print(F"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase ,_UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 622 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[str] ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : Any=None ):
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(_UpperCamelCase ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
lowerCAmelCase__ = OPTConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = """gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = embed_dim
__lowerCamelCase = word_embed_proj_dim
__lowerCamelCase = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__UpperCAmelCase , **self.config_updates , )
__lowerCamelCase = prepare_opt_inputs_dict(__UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel(config=__UpperCAmelCase )
__lowerCamelCase = inputs_dict['''input_ids''']
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict['''attention_mask'''][:1, :]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCamelCase = model_class(config=__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__UpperCAmelCase )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
__lowerCamelCase = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __UpperCAmelCase )
# check that weights remain the same after resizing
__lowerCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __UpperCAmelCase )
__lowerCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase = False
self.assertTrue(__UpperCAmelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
return tf.constant(_UpperCamelCase ,dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCamelCase = input_ids.shape[0]
__lowerCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowerCamelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase = tf.not_equal(__UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
__lowerCamelCase = model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase ).last_hidden_state
__lowerCamelCase = (1, 11, 512)
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-3 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = xla_generate(__UpperCAmelCase , __UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = '''facebook/opt-350m'''
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCamelCase = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
__lowerCamelCase = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
__lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-125m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = '''left'''
# use different length sentences to test batching
__lowerCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase )
__lowerCamelCase = inputs['''input_ids''']
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''] )
__lowerCamelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase )
__lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowerCamelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
__lowerCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowerCamelCase = []
__lowerCamelCase = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
__lowerCamelCase = model.generate(__UpperCAmelCase , max_length=10 )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 622 | 1 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length, 2) ,_UpperCamelCase )
else:
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length) ,_UpperCamelCase )
for i, tensor in enumerate(_UpperCamelCase ):
if padding_side == "right":
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
else:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = ord(_UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__lowerCamelCase = unicodedata.category(_UpperCamelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = -1_0_0
lowerCAmelCase__ = "pt"
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
import torch
__lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCamelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCamelCase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__lowerCamelCase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowerCamelCase = self.tokenizer.padding_side
if padding_side == "right":
__lowerCamelCase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowerCamelCase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowerCamelCase = [feature['''ner_tags'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = [feature['''original_entity_spans'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 622 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ = logging.getLogger(__name__)
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any] ):
__lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 )
return np.sum(outputs == labels )
def a__ ( _UpperCamelCase : Optional[int] ):
with open(_UpperCamelCase ,encoding='''utf_8''' ) as f:
__lowerCamelCase = csv.reader(_UpperCamelCase )
__lowerCamelCase = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Dict ):
__lowerCamelCase = []
for dataset in encoded_datasets:
__lowerCamelCase = len(_UpperCamelCase )
__lowerCamelCase = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch, 2) ,dtype=np.intaa )
__lowerCamelCase = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa )
__lowerCamelCase = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = len(_UpperCamelCase ) - 1
__lowerCamelCase = with_conta
__lowerCamelCase = with_conta
__lowerCamelCase = mc_label
__lowerCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' ,type=_UpperCamelCase ,default='''openai-gpt''' ,help='''pretrained model name''' )
parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
parser.add_argument('''--train_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--eval_dataset''' ,type=_UpperCamelCase ,default='''''' )
parser.add_argument('''--seed''' ,type=_UpperCamelCase ,default=42 )
parser.add_argument('''--num_train_epochs''' ,type=_UpperCamelCase ,default=3 )
parser.add_argument('''--train_batch_size''' ,type=_UpperCamelCase ,default=8 )
parser.add_argument('''--eval_batch_size''' ,type=_UpperCamelCase ,default=16 )
parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=_UpperCamelCase ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' ,type=_UpperCamelCase ,default=1 )
parser.add_argument(
'''--max_steps''' ,default=-1 ,type=_UpperCamelCase ,help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) ,)
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,)
parser.add_argument('''--learning_rate''' ,type=_UpperCamelCase ,default=6.25e-5 )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=_UpperCamelCase ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' ,type=_UpperCamelCase ,default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' ,type=_UpperCamelCase ,default=0.01 )
parser.add_argument('''--lm_coef''' ,type=_UpperCamelCase ,default=0.9 )
parser.add_argument('''--n_valid''' ,type=_UpperCamelCase ,default=3_74 )
parser.add_argument('''--server_ip''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' )
__lowerCamelCase = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowerCamelCase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase ,_UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowerCamelCase = ['''_start_''', '''_delimiter_''', '''_classify_''']
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase : Dict ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__lowerCamelCase = load_rocstories_dataset(args.train_dataset )
__lowerCamelCase = load_rocstories_dataset(args.eval_dataset )
__lowerCamelCase = (train_dataset, eval_dataset)
__lowerCamelCase = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
__lowerCamelCase = model.config.n_positions // 2 - 2
__lowerCamelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowerCamelCase = min(_UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowerCamelCase = pre_process_datasets(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,*_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = tensor_datasets[0], tensor_datasets[1]
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = RandomSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.train_batch_size )
__lowerCamelCase = TensorDataset(*_UpperCamelCase )
__lowerCamelCase = SequentialSampler(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowerCamelCase = args.max_steps
__lowerCamelCase = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
__lowerCamelCase = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowerCamelCase = list(model.named_parameters() )
__lowerCamelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__lowerCamelCase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__lowerCamelCase = AdamW(_UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon )
__lowerCamelCase = get_linear_schedule_with_warmup(
_UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=_UpperCamelCase )
if args.do_train:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ):
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = tqdm(_UpperCamelCase ,desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
__lowerCamelCase = model(_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowerCamelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowerCamelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowerCamelCase = model.module if hasattr(_UpperCamelCase ,'''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
__lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase )
torch.save(model_to_save.state_dict() ,_UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
__lowerCamelCase ,__lowerCamelCase = 0, 0
__lowerCamelCase ,__lowerCamelCase = 0, 0
for batch in tqdm(_UpperCamelCase ,desc='''Evaluating''' ):
__lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch
with torch.no_grad():
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model(
_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase )
__lowerCamelCase = mc_logits.detach().cpu().numpy()
__lowerCamelCase = mc_labels.to('''cpu''' ).numpy()
__lowerCamelCase = accuracy(_UpperCamelCase ,_UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowerCamelCase = eval_loss / nb_eval_steps
__lowerCamelCase = eval_accuracy / nb_eval_examples
__lowerCamelCase = tr_loss / nb_tr_steps if args.do_train else None
__lowerCamelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__lowerCamelCase = os.path.join(args.output_dir ,'''eval_results.txt''' )
with open(_UpperCamelCase ,'''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' ,_UpperCamelCase ,str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 622 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
a_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowerCAmelCase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase ( self ):
'''simple docstring'''
if self.train_file is not None:
__lowerCamelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCamelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCamelCase = [feature.pop(__UpperCAmelCase ) for feature in features]
__lowerCamelCase = len(__UpperCAmelCase )
__lowerCamelCase = len(features[0]['''input_ids'''] )
__lowerCamelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCAmelCase )] for feature in features
]
__lowerCamelCase = list(chain(*__UpperCAmelCase ) )
__lowerCamelCase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
__lowerCamelCase = {k: v.view(__UpperCAmelCase , __UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCamelCase = torch.tensor(__UpperCAmelCase , dtype=torch.intaa )
return batch
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' ,_UpperCamelCase ,_UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCamelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCamelCase = {}
if data_args.train_file is not None:
__lowerCamelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCamelCase = data_args.validation_file
__lowerCamelCase = data_args.train_file.split('''.''' )[-1]
__lowerCamelCase = load_dataset(
_UpperCamelCase ,data_files=_UpperCamelCase ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
# Downloading and loading the swag dataset from the hub.
__lowerCamelCase = load_dataset(
'''swag''' ,'''regular''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
__lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
__lowerCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=_UpperCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCamelCase = [F"""ending{i}""" for i in range(4 )]
__lowerCamelCase = '''sent1'''
__lowerCamelCase = '''sent2'''
if data_args.max_seq_length is None:
__lowerCamelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
__lowerCamelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__lowerCamelCase = min(data_args.max_seq_length ,tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCamelCase : Dict ):
__lowerCamelCase = [[context] * 4 for context in examples[context_name]]
__lowerCamelCase = examples[question_header_name]
__lowerCamelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_UpperCamelCase )
]
# Flatten out
__lowerCamelCase = list(chain(*_UpperCamelCase ) )
__lowerCamelCase = list(chain(*_UpperCamelCase ) )
# Tokenize
__lowerCamelCase = tokenizer(
_UpperCamelCase ,_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ,padding='''max_length''' if data_args.pad_to_max_length else False ,)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 ,len(_UpperCamelCase ) ,4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
__lowerCamelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
__lowerCamelCase = min(len(_UpperCamelCase ) ,data_args.max_train_samples )
__lowerCamelCase = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
__lowerCamelCase = train_dataset.map(
_UpperCamelCase ,batched=_UpperCamelCase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
__lowerCamelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
__lowerCamelCase = min(len(_UpperCamelCase ) ,data_args.max_eval_samples )
__lowerCamelCase = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
__lowerCamelCase = eval_dataset.map(
_UpperCamelCase ,batched=_UpperCamelCase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
# Data collator
__lowerCamelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCamelCase ,pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCamelCase : str ):
__lowerCamelCase ,__lowerCamelCase = eval_predictions
__lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCamelCase = Trainer(
model=_UpperCamelCase ,args=_UpperCamelCase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=_UpperCamelCase ,data_collator=_UpperCamelCase ,compute_metrics=_UpperCamelCase ,)
# Training
if training_args.do_train:
__lowerCamelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase = last_checkpoint
__lowerCamelCase = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCamelCase = train_result.metrics
__lowerCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
__lowerCamelCase = min(_UpperCamelCase ,len(_UpperCamelCase ) )
trainer.log_metrics('''train''' ,_UpperCamelCase )
trainer.save_metrics('''train''' ,_UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowerCamelCase = trainer.evaluate()
__lowerCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
__lowerCamelCase = min(_UpperCamelCase ,len(_UpperCamelCase ) )
trainer.log_metrics('''eval''' ,_UpperCamelCase )
trainer.save_metrics('''eval''' ,_UpperCamelCase )
__lowerCamelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def a__ ( _UpperCamelCase : List[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 622 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1024 , __UpperCAmelCase=1024 , __UpperCAmelCase=3.6 ):
'''simple docstring'''
__lowerCamelCase = tokenizer
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = dataset
__lowerCamelCase = seq_length
__lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = iter(self.dataset )
__lowerCamelCase = True
while more_examples:
__lowerCamelCase ,__lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__UpperCAmelCase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowerCamelCase = False
break
__lowerCamelCase = tokenizer(__UpperCAmelCase , truncation=__UpperCAmelCase )['''input_ids''']
__lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__UpperCAmelCase ) , self.seq_length ):
__lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(__UpperCAmelCase ) == self.seq_length:
yield torch.tensor(__UpperCAmelCase )
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = {'''streaming''': True}
__lowerCamelCase = load_dataset(args.dataset_name ,split='''train''' ,**_UpperCamelCase )
__lowerCamelCase = ConstantLengthDataset(_UpperCamelCase ,_UpperCamelCase ,seq_length=args.seq_length )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=args.batch_size )
return eval_dataloader
def a__ ( _UpperCamelCase : str ):
model.eval()
__lowerCamelCase = []
for step, batch in enumerate(_UpperCamelCase ):
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase ,labels=_UpperCamelCase )
__lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCamelCase = torch.mean(torch.cat(_UpperCamelCase ) )
try:
__lowerCamelCase = torch.exp(_UpperCamelCase )
except OverflowError:
__lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
a_ = Accelerator()
# Parse configuration
a_ = HfArgumentParser(EvaluationArguments)
a_ = parser.parse_args()
set_seed(args.seed)
# Logging
a_ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
a_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
a_ , a_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
a_ , a_ = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 622 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = embedding_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MegatronBertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MegatronBertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MegatronBertForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MegatronBertForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MegatronBertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MegatronBertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MegatronBertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MegatronBertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
# test_resize_embeddings = False
lowerCAmelCase__ = False
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MegatronBertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase )
def a__ ( _UpperCamelCase : str ):
return torch.tensor(
_UpperCamelCase ,dtype=torch.long ,device=_UpperCamelCase ,)
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__lowerCamelCase = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase )
__lowerCamelCase = MegatronBertModel.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.half()
__lowerCamelCase = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
__lowerCamelCase = output[0, ii, jj]
__lowerCamelCase = expected[3 * ii + jj]
__lowerCamelCase = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
| 622 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """lxmert"""
lowerCAmelCase__ = {}
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=9500 , __UpperCAmelCase=1600 , __UpperCAmelCase=400 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=9 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=2048 , __UpperCAmelCase=4 , __UpperCAmelCase=6.67 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = num_qa_labels
__lowerCamelCase = num_object_labels
__lowerCamelCase = num_attr_labels
__lowerCamelCase = l_layers
__lowerCamelCase = x_layers
__lowerCamelCase = r_layers
__lowerCamelCase = visual_feat_dim
__lowerCamelCase = visual_pos_dim
__lowerCamelCase = visual_loss_normalizer
__lowerCamelCase = task_matched
__lowerCamelCase = task_mask_lm
__lowerCamelCase = task_obj_predict
__lowerCamelCase = task_qa
__lowerCamelCase = visual_obj_loss
__lowerCamelCase = visual_attr_loss
__lowerCamelCase = visual_feat_loss
__lowerCamelCase = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__UpperCAmelCase )
| 622 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a_ = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = len(__UpperCAmelCase )
__lowerCamelCase = [any_type for _ in range(self.N )] + arr
__lowerCamelCase = fnc
self.build()
def lowerCamelCase ( self ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
__lowerCamelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
p += self.N
__lowerCamelCase = v
while p > 1:
__lowerCamelCase = p // 2
__lowerCamelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): # noqa: E741
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = l + self.N, r + self.N
__lowerCamelCase = None
while l <= r:
if l % 2 == 1:
__lowerCamelCase = self.st[l] if res is None else self.fn(__UpperCAmelCase , self.st[l] )
if r % 2 == 0:
__lowerCamelCase = self.st[r] if res is None else self.fn(__UpperCAmelCase , self.st[r] )
__lowerCamelCase ,__lowerCamelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
a_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
a_ = SegmentTree(test_array, min)
a_ = SegmentTree(test_array, max)
a_ = SegmentTree(test_array, lambda a, b: a + b)
def a__ ( ):
for i in range(len(_UpperCamelCase ) ):
for j in range(_UpperCamelCase ,len(_UpperCamelCase ) ):
__lowerCamelCase = reduce(_UpperCamelCase ,test_array[i : j + 1] )
__lowerCamelCase = reduce(_UpperCamelCase ,test_array[i : j + 1] )
__lowerCamelCase = reduce(lambda _UpperCamelCase ,_UpperCamelCase : a + b ,test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCamelCase ,_UpperCamelCase )
assert max_range == max_segment_tree.query(_UpperCamelCase ,_UpperCamelCase )
assert sum_range == sum_segment_tree.query(_UpperCamelCase ,_UpperCamelCase )
test_all_segments()
for index, value in test_updates.items():
a_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 622 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length, 2) ,_UpperCamelCase )
else:
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length) ,_UpperCamelCase )
for i, tensor in enumerate(_UpperCamelCase ):
if padding_side == "right":
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
else:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = ord(_UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__lowerCamelCase = unicodedata.category(_UpperCamelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = -1_0_0
lowerCAmelCase__ = "pt"
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
import torch
__lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCamelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCamelCase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__lowerCamelCase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowerCamelCase = self.tokenizer.padding_side
if padding_side == "right":
__lowerCamelCase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowerCamelCase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowerCamelCase = [feature['''ner_tags'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = [feature['''original_entity_spans'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 622 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=1000 , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = range_bbox
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase = bbox[i, j, 3]
__lowerCamelCase = bbox[i, j, 1]
__lowerCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase = bbox[i, j, 2]
__lowerCamelCase = bbox[i, j, 0]
__lowerCamelCase = t
__lowerCamelCase = tf.convert_to_tensor(__UpperCAmelCase )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFLayoutLMModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFLayoutLMForMaskedLM(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFLayoutLMForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFLayoutLMForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TFLayoutLMForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFLayoutLMModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFLayoutLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def a__ ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
__lowerCamelCase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
__lowerCamelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__lowerCamelCase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
__lowerCamelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__lowerCamelCase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
# test the sequence output on [0, :3, :3]
__lowerCamelCase = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
__lowerCamelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __UpperCAmelCase , atol=1E-3 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# initialize model with randomly initialized sequence classification head
__lowerCamelCase = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(
input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__lowerCamelCase = outputs.loss
__lowerCamelCase = (2,)
self.assertEqual(loss.shape , __UpperCAmelCase )
# test the shape of the logits
__lowerCamelCase = outputs.logits
__lowerCamelCase = (2, 2)
self.assertEqual(logits.shape , __UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# initialize model with randomly initialized token classification head
__lowerCamelCase = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(
input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
# test the shape of the logits
__lowerCamelCase = outputs.logits
__lowerCamelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , __UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# initialize model with randomly initialized token classification head
__lowerCamelCase = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
# test the shape of the logits
__lowerCamelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , __UpperCAmelCase )
self.assertEqual(outputs.end_logits.shape , __UpperCAmelCase )
| 622 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=[1, 1, 2] , __UpperCAmelCase=1 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=3 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = 2
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCamelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCamelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCamelCase = self.num_hidden_layers + 2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForPreTraining(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForMaskedLM(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self , base=__UpperCAmelCase )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 622 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
a_ = """
Human: <<task>>
Assistant: """
a_ = """huggingface-tools/default-prompts"""
a_ = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Any="run" ):
if prompt_or_repo_id is None:
__lowerCamelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' ,_UpperCamelCase ) is not None:
return prompt_or_repo_id
__lowerCamelCase = cached_file(
_UpperCamelCase ,PROMPT_FILES[mode] ,repo_type='''dataset''' ,user_agent={'''agent''': agent_name} )
with open(_UpperCamelCase ,'''r''' ,encoding='''utf-8''' ) as f:
return f.read()
| 622 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("""covid_data""", """cases deaths recovered""")
def a__ ( _UpperCamelCase : str = "https://www.worldometers.info/coronavirus/" ):
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(_UpperCamelCase ).content ).xpath(_UpperCamelCase ) )
a_ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 622 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """realm"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=128 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=8 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=256 , __UpperCAmelCase=10 , __UpperCAmelCase=1E-3 , __UpperCAmelCase=5 , __UpperCAmelCase=320 , __UpperCAmelCase=13353718 , __UpperCAmelCase=5000 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
# Common config
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = retriever_proj_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = num_candidates
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
# Reader config
__lowerCamelCase = span_hidden_size
__lowerCamelCase = max_span_width
__lowerCamelCase = reader_layer_norm_eps
__lowerCamelCase = reader_beam_size
__lowerCamelCase = reader_seq_len
# Retrieval config
__lowerCamelCase = num_block_records
__lowerCamelCase = searcher_beam_size
| 622 |
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str = " " ):
__lowerCamelCase = []
__lowerCamelCase = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
__lowerCamelCase = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 622 | 1 |
from __future__ import annotations
def a__ ( _UpperCamelCase : list[float] ):
if len(_UpperCamelCase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
__lowerCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 622 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = 8
# DPR tok
__lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__lowerCamelCase = os.path.join(self.tmpdirname , '''dataset''' )
__lowerCamelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCamelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__lowerCamelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__lowerCamelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) )
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
import torch
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
__lowerCamelCase = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary.
| 622 | 1 |
import numpy as np
class __lowerCAmelCase :
def __init__( self ):
'''simple docstring'''
__lowerCamelCase = (0, 0)
__lowerCamelCase = None
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
def __eq__( self , __UpperCAmelCase ):
'''simple docstring'''
return self.position == cell.position
def lowerCamelCase ( self ):
'''simple docstring'''
print(self.position )
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase=(5, 5) ):
'''simple docstring'''
__lowerCamelCase = np.zeros(__UpperCAmelCase )
__lowerCamelCase = world_size[0]
__lowerCamelCase = world_size[1]
def lowerCamelCase ( self ):
'''simple docstring'''
print(self.w )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase = cell.position[0]
__lowerCamelCase = cell.position[1]
__lowerCamelCase = []
for n in neughbour_cord:
__lowerCamelCase = current_x + n[0]
__lowerCamelCase = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase = Cell()
__lowerCamelCase = (x, y)
__lowerCamelCase = cell
neighbours.append(__UpperCAmelCase )
return neighbours
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : str ):
__lowerCamelCase = []
__lowerCamelCase = []
_open.append(_UpperCamelCase )
while _open:
__lowerCamelCase = np.argmin([n.f for n in _open] )
__lowerCamelCase = _open[min_f]
_closed.append(_open.pop(_UpperCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(_UpperCamelCase ):
for c in _closed:
if c == n:
continue
__lowerCamelCase = current.g + 1
__lowerCamelCase ,__lowerCamelCase = n.position
__lowerCamelCase ,__lowerCamelCase = goal.position
__lowerCamelCase = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_UpperCamelCase )
__lowerCamelCase = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a_ = Gridworld()
# Start position and goal
a_ = Cell()
a_ = (0, 0)
a_ = Cell()
a_ = (4, 4)
print(f"path from {start.position} to {goal.position}")
a_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a_ = 1
print(world.w)
| 622 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """poolformer"""
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=4.0 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[64, 128, 320, 512] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[2, 1, 1, 1] , __UpperCAmelCase=4 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = pool_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = mlp_ratio
__lowerCamelCase = depths
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_layer_scale
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = initializer_range
super().__init__(**__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 2E-3
| 622 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.