code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowerCAmelCase_ ( _snake_case : int=None , _snake_case : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_snake_case )
@dataclass
class _snake_case :
UpperCamelCase__ = field(
metadata={'help': 'The csv file to plot.'} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Disable logarithmic scale when plotting'} , )
UpperCamelCase__ = field(
default=snake_case , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
UpperCamelCase__ = list_field(
default=snake_case , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def lowerCAmelCase_ ( _snake_case : int ) -> str:
'''simple docstring'''
try:
int(_snake_case )
return True
except ValueError:
return False
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
try:
float(_snake_case )
return True
except ValueError:
return False
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Optional[Any] = args
__magic_name__ : List[str] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
__magic_name__ : Optional[Any] = csv.DictReader(_a )
for row in reader:
__magic_name__ : Any = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
__magic_name__ : List[Any] = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
__magic_name__ : Dict = float(row["result"] )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : Tuple = plt.subplots()
__magic_name__ : Optional[int] = "Time usage" if self.args.is_time else "Memory usage"
__magic_name__ : int = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__magic_name__ : int = sorted(set(self.result_dict[model_name]["bsz"] ) )
__magic_name__ : int = sorted(set(self.result_dict[model_name]["seq_len"] ) )
__magic_name__ : List[Any] = self.result_dict[model_name]["result"]
((__magic_name__) , (__magic_name__)) : str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__magic_name__ : str = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__magic_name__ : List[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_a , )
else:
__magic_name__ : Union[str, Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__magic_name__) , (__magic_name__)) : int = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
__magic_name__ : Dict = np.asarray(_a , _a )[: len(_a )]
plt.scatter(
_a , _a , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_a , _a , "--" )
title_str += f''' {label_model_name} vs.'''
__magic_name__ : Tuple = title_str[:-4]
__magic_name__ : Dict = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(_a )
plt.xlabel(_a )
plt.ylabel(_a )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Any = HfArgumentParser(_snake_case )
__magic_name__ : List[str] = parser.parse_args_into_dataclasses()[0]
__magic_name__ : Dict = Plot(args=_snake_case )
plot.plot()
if __name__ == "__main__":
main()
| 281 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = "mock-s3-bucket"
__magic_name__ : Any = F'''s3://{mock_bucket}'''
__magic_name__ : str = extract_path_from_uri(_snake_case )
assert dataset_path.startswith("s3://" ) is False
__magic_name__ : Tuple = "./local/path"
__magic_name__ : Optional[Any] = extract_path_from_uri(_snake_case )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : str = is_remote_filesystem(_snake_case )
assert is_remote is True
__magic_name__ : Optional[int] = fsspec.filesystem("file" )
__magic_name__ : int = is_remote_filesystem(_snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__magic_name__ : str = input_paths[compression_fs_class.protocol]
if input_path is None:
__magic_name__ : Dict = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_snake_case )
__magic_name__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case )
assert isinstance(_snake_case , _snake_case )
__magic_name__ : int = os.path.basename(_snake_case )
__magic_name__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_snake_case , "r" , encoding="utf-8" ) as f, open(_snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : int = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__magic_name__ : int = compressed_file_paths[protocol]
__magic_name__ : Tuple = "dataset.jsonl"
__magic_name__ : List[str] = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__magic_name__ , *__magic_name__ : Optional[Any] = fsspec.get_fs_token_paths(_snake_case )
assert fs.isfile(_snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ) -> str:
'''simple docstring'''
__magic_name__ : int = hf_api.dataset_info(_snake_case , token=_snake_case )
__magic_name__ : Optional[Any] = HfFileSystem(repo_info=_snake_case , token=_snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_snake_case , _snake_case , clobber=_snake_case )
with pytest.warns(_snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 281 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 281 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'convbert'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a=768 , _a=2 , _a=9 , _a=1 , _a=None , **_a , ):
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : List[Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : List[str] = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[Any] = embedding_size
__magic_name__ : List[Any] = head_ratio
__magic_name__ : str = conv_kernel_size
__magic_name__ : Dict = num_groups
__magic_name__ : str = classifier_dropout
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 281 | 1 |
from maths.prime_factors import prime_factors
def lowerCAmelCase_ ( _snake_case : int ) -> int:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(_snake_case ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : int = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
__magic_name__ : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" )
return image
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = dct.pop(_snake_case )
__magic_name__ : int = val
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ : List[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__magic_name__ : Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__magic_name__ : Optional[int] = torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) )
__magic_name__ : Union[str, Any] = qkv_bias
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : str ) -> int:
'''simple docstring'''
__magic_name__ : List[Any] = 364 if "coco" in model_name else 224
__magic_name__ : Union[str, Any] = BlipaVisionConfig(image_size=_snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__magic_name__ : List[str] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=_snake_case ).to_dict()
elif "opt-6.7b" in model_name:
__magic_name__ : Any = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=_snake_case ).to_dict()
elif "t5-xl" in model_name:
__magic_name__ : Dict = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
__magic_name__ : List[Any] = BlipaConfig(vision_config=_snake_case , text_config=_snake_case )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : str=None , _snake_case : Dict=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
__magic_name__ : List[Any] = tokenizer("\n" , add_special_tokens=_snake_case ).input_ids[0]
__magic_name__ , __magic_name__ : Tuple = get_blipa_config(_snake_case , eos_token_id=_snake_case )
__magic_name__ : Union[str, Any] = BlipaForConditionalGeneration(_snake_case ).eval()
__magic_name__ : Any = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
__magic_name__ , __magic_name__ : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__magic_name__ : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = load_model_and_preprocess(
name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case )
original_model.eval()
print("Done!" )
# update state dict keys
__magic_name__ : Dict = original_model.state_dict()
__magic_name__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ : Any = state_dict.pop(_snake_case )
if key.startswith("Qformer.bert" ):
__magic_name__ : Optional[int] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
__magic_name__ : Any = key.replace("self" , "attention" )
if "opt_proj" in key:
__magic_name__ : Union[str, Any] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
__magic_name__ : Optional[int] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
__magic_name__ : List[str] = key.replace("opt" , "language" )
if key.startswith("t5" ):
__magic_name__ : Tuple = key.replace("t5" , "language" )
__magic_name__ : Dict = val
# read in qv biases
read_in_q_v_bias(_snake_case , _snake_case )
__magic_name__ , __magic_name__ : Tuple = hf_model.load_state_dict(_snake_case , strict=_snake_case )
assert len(_snake_case ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__magic_name__ : List[Any] = load_demo_image()
__magic_name__ : Tuple = vis_processors["eval"](_snake_case ).unsqueeze(0 ).to(_snake_case )
__magic_name__ : Dict = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(_snake_case )
# create processor
__magic_name__ : Optional[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_snake_case , image_std=_snake_case )
__magic_name__ : Dict = BlipaProcessor(image_processor=_snake_case , tokenizer=_snake_case )
__magic_name__ : Union[str, Any] = processor(images=_snake_case , return_tensors="pt" ).pixel_values.to(_snake_case )
# make sure processor creates exact same pixel values
assert torch.allclose(_snake_case , _snake_case )
original_model.to(_snake_case )
hf_model.to(_snake_case )
with torch.no_grad():
if "opt" in model_name:
__magic_name__ : List[Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
__magic_name__ : Optional[int] = hf_model(_snake_case , _snake_case ).logits
else:
__magic_name__ : int = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
__magic_name__ : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__magic_name__ : List[str] = hf_model(_snake_case , _snake_case , labels=_snake_case ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__magic_name__ : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=_snake_case )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__magic_name__ : Tuple = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=_snake_case )
else:
# cast to same type
__magic_name__ : str = logits.dtype
assert torch.allclose(original_logits.to(_snake_case ) , _snake_case , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
__magic_name__ : Optional[int] = ""
__magic_name__ : Dict = tokenizer(_snake_case , return_tensors="pt" ).input_ids.to(_snake_case )
__magic_name__ : int = original_model.generate({"image": original_pixel_values} )
__magic_name__ : Optional[Any] = hf_model.generate(
_snake_case , _snake_case , do_sample=_snake_case , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , _snake_case )
__magic_name__ : Tuple = input_ids.shape[1]
__magic_name__ : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_snake_case )
__magic_name__ : Union[str, Any] = [text.strip() for text in output_text]
print("HF generation:" , _snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
snake_case : Union[str, Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
snake_case : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _snake_case ( snake_case ):
def __init__( self , _a = "▁" , _a = True , _a = "<unk>" , _a = "</s>" , _a = "<pad>" , ):
__magic_name__ : int = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
__magic_name__ : Tuple = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__magic_name__ : Dict = token_dict["token"]
__magic_name__ : Dict = Tokenizer(Unigram() )
__magic_name__ : Tuple = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
__magic_name__ : Optional[Any] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_a , add_prefix_space=_a ),
pre_tokenizers.Digits(individual_digits=_a ),
pre_tokenizers.Punctuation(),
] )
__magic_name__ : List[Any] = decoders.Metaspace(replacement=_a , add_prefix_space=_a )
__magic_name__ : List[str] = TemplateProcessing(
single=f'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
__magic_name__ : str = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(_a , _a )
def SCREAMING_SNAKE_CASE ( self , _a , _a = 8_000 , _a = True , ):
__magic_name__ : Tuple = trainers.UnigramTrainer(
vocab_size=_a , special_tokens=self.special_tokens_list , show_progress=_a , )
if isinstance(_a , _a ):
__magic_name__ : Any = [files]
self._tokenizer.train(_a , trainer=_a )
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self , _a , _a = 8_000 , _a = True , ):
__magic_name__ : str = trainers.UnigramTrainer(
vocab_size=_a , special_tokens=self.special_tokens_list , show_progress=_a , )
self._tokenizer.train_from_iterator(_a , trainer=_a )
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = json.loads(self._tokenizer.to_str() )
__magic_name__ : Optional[int] = self.special_tokens["unk"]["id"]
__magic_name__ : str = Tokenizer.from_str(json.dumps(_a ) )
| 281 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
snake_case : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
snake_case : Union[str, Any] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
__magic_name__ : List[str] = set(_snake_case )
return pairs
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
__magic_name__ : Dict = vocab_file
__magic_name__ : Tuple = merges_file
__magic_name__ : List[Any] = {}
__magic_name__ : List[Any] = 0
__magic_name__ : Tuple = 1
__magic_name__ : int = 2
__magic_name__ : Union[str, Any] = 3
self.add_from_file(_a )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
__magic_name__ : List[str] = merges_handle.read().split("\n" )[:-1]
__magic_name__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__magic_name__ : Union[str, Any] = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _a ):
if token in self.cache:
return self.cache[token]
__magic_name__ : List[Any] = tuple(_a )
__magic_name__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__magic_name__ : Any = get_pairs(_a )
if not pairs:
return token
while True:
__magic_name__ : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : List[str] = bigram
__magic_name__ : List[str] = []
__magic_name__ : List[str] = 0
while i < len(_a ):
try:
__magic_name__ : Any = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Union[str, Any] = tuple(_a )
__magic_name__ : Optional[int] = new_word
if len(_a ) == 1:
break
else:
__magic_name__ : List[Any] = get_pairs(_a )
__magic_name__ : Optional[int] = "@@ ".join(_a )
__magic_name__ : Tuple = word[:-4]
__magic_name__ : str = word
return word
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = []
__magic_name__ : Dict = re.findall(r"\S+\n?" , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = " ".join(_a ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Union[str, Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self , _a ):
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__magic_name__ : List[Any] = f.readlines()
for lineTmp in lines:
__magic_name__ : Optional[Any] = lineTmp.strip()
__magic_name__ : Union[str, Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__magic_name__ : Optional[int] = line[:idx]
__magic_name__ : Dict = len(self.encoder )
| 281 | 1 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = len(_snake_case ) + 1
__magic_name__ : List[str] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ : str = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__magic_name__ : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__magic_name__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ : List[Any] = dp[i - 1][j]
else:
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : Optional[Any] = "aab"
snake_case : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 281 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( _snake_case : str = "laptop" ) -> DataFrame:
'''simple docstring'''
__magic_name__ : Tuple = F'''https://www.amazon.in/laptop/s?k={product}'''
__magic_name__ : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__magic_name__ : Tuple = BeautifulSoup(requests.get(_snake_case , headers=_snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__magic_name__ : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
__magic_name__ : Dict = item.ha.text
__magic_name__ : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
__magic_name__ : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
__magic_name__ : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__magic_name__ : Dict = "Not available"
try:
__magic_name__ : Optional[int] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__magic_name__ : List[str] = ""
try:
__magic_name__ : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
__magic_name__ : str = float("nan" )
except AttributeError:
pass
__magic_name__ : Optional[int] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__magic_name__ : Optional[Any] = " "
__magic_name__ : str = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case : Any = "headphones"
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 281 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
snake_case : List[str] = True
from torch.cuda.amp import autocast
snake_case : int = logging.getLogger(__name__)
def lowerCAmelCase_ ( _snake_case : List[str]=None , _snake_case : Union[str, Any]=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_snake_case )
@dataclass
class _snake_case :
UpperCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase__ = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
UpperCamelCase__ = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
UpperCamelCase__ = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
UpperCamelCase__ = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
UpperCamelCase__ = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
UpperCamelCase__ = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class _snake_case :
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase__ = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase__ = field(
default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase__ = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase__ = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
UpperCamelCase__ = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class _snake_case :
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
def __call__( self , _a ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__magic_name__ : Union[str, Any] = [{"input_values": feature["input_values"]} for feature in features]
__magic_name__ : str = [{"input_ids": feature["labels"]} for feature in features]
__magic_name__ : List[str] = self.processor.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
__magic_name__ : Optional[int] = self.processor.pad(
labels=_a , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
__magic_name__ : Any = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__magic_name__ : Optional[Any] = labels
return batch
class _snake_case ( snake_case ):
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
model.train()
__magic_name__ : List[str] = self._prepare_inputs(_a )
if self.use_amp:
with autocast():
__magic_name__ : Optional[Any] = self.compute_loss(_a , _a )
else:
__magic_name__ : Optional[Any] = self.compute_loss(_a , _a )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__magic_name__ : int = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__magic_name__ : int = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__magic_name__ : Dict = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_a ).backward()
elif self.use_apex:
with amp.scale_loss(_a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_a )
else:
loss.backward()
return loss.detach()
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ , __magic_name__ , __magic_name__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ , __magic_name__ , __magic_name__ : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__magic_name__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__magic_name__ : Dict = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
__magic_name__ : Optional[int] = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
__magic_name__ : Union[str, Any] = F'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(_snake_case : Union[str, Any] ):
__magic_name__ : List[Any] = re.sub(_snake_case , "" , batch["sentence"] ).lower() + " "
return batch
__magic_name__ : int = train_dataset.map(_snake_case , remove_columns=["sentence"] )
__magic_name__ : Tuple = eval_dataset.map(_snake_case , remove_columns=["sentence"] )
def extract_all_chars(_snake_case : Union[str, Any] ):
__magic_name__ : str = " ".join(batch["text"] )
__magic_name__ : Union[str, Any] = list(set(_snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
__magic_name__ : Optional[Any] = train_dataset.map(
_snake_case , batched=_snake_case , batch_size=-1 , keep_in_memory=_snake_case , remove_columns=train_dataset.column_names , )
__magic_name__ : Optional[int] = train_dataset.map(
_snake_case , batched=_snake_case , batch_size=-1 , keep_in_memory=_snake_case , remove_columns=eval_dataset.column_names , )
__magic_name__ : List[Any] = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
__magic_name__ : Dict = {v: k for k, v in enumerate(_snake_case )}
__magic_name__ : Optional[int] = vocab_dict[" "]
del vocab_dict[" "]
__magic_name__ : str = len(_snake_case )
__magic_name__ : Any = len(_snake_case )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(_snake_case , _snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : List[str] = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
__magic_name__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=_snake_case , return_attention_mask=_snake_case )
__magic_name__ : Optional[Any] = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
__magic_name__ : Tuple = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__magic_name__ : Optional[int] = min(len(_snake_case ) , data_args.max_train_samples )
__magic_name__ : str = train_dataset.select(range(_snake_case ) )
if data_args.max_val_samples is not None:
__magic_name__ : Any = eval_dataset.select(range(data_args.max_val_samples ) )
__magic_name__ : Any = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_snake_case : List[str] ):
__magic_name__ , __magic_name__ : str = torchaudio.load(batch["path"] )
__magic_name__ : Optional[int] = resampler(_snake_case ).squeeze().numpy()
__magic_name__ : Tuple = 16000
__magic_name__ : int = batch["text"]
return batch
__magic_name__ : List[str] = train_dataset.map(
_snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__magic_name__ : str = eval_dataset.map(
_snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_snake_case : Any ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
__magic_name__ : int = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(_snake_case )
return batch
__magic_name__ : Optional[int] = train_dataset.map(
_snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , )
__magic_name__ : Optional[Any] = eval_dataset.map(
_snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
__magic_name__ : Union[str, Any] = datasets.load_metric("wer" )
def compute_metrics(_snake_case : List[Any] ):
__magic_name__ : Optional[Any] = pred.predictions
__magic_name__ : Any = np.argmax(_snake_case , axis=-1 )
__magic_name__ : str = processor.tokenizer.pad_token_id
__magic_name__ : str = processor.batch_decode(_snake_case )
# we do not want to group tokens when computing the metrics
__magic_name__ : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=_snake_case )
__magic_name__ : List[str] = wer_metric.compute(predictions=_snake_case , references=_snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__magic_name__ : int = DataCollatorCTCWithPadding(processor=_snake_case , padding=_snake_case )
# Initialize our Trainer
__magic_name__ : List[str] = CTCTrainer(
model=_snake_case , data_collator=_snake_case , args=_snake_case , compute_metrics=_snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__magic_name__ : Any = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__magic_name__ : Union[str, Any] = model_args.model_name_or_path
else:
__magic_name__ : List[Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__magic_name__ : Any = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
__magic_name__ : Union[str, Any] = train_result.metrics
__magic_name__ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_snake_case )
)
__magic_name__ : Dict = min(_snake_case , len(_snake_case ) )
trainer.log_metrics("train" , _snake_case )
trainer.save_metrics("train" , _snake_case )
trainer.save_state()
# Evaluation
__magic_name__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__magic_name__ : Tuple = trainer.evaluate()
__magic_name__ : Any = data_args.max_val_samples if data_args.max_val_samples is not None else len(_snake_case )
__magic_name__ : List[Any] = min(_snake_case , len(_snake_case ) )
trainer.log_metrics("eval" , _snake_case )
trainer.save_metrics("eval" , _snake_case )
return results
if __name__ == "__main__":
main()
| 281 |
from __future__ import annotations
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Optional[Any] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase_ ( _snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase_ ( _snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase_ ( _snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
__magic_name__ : int = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : Optional[Any] = Node(4 )
__magic_name__ : Union[str, Any] = Node(5 )
__magic_name__ : Any = Node(6 )
__magic_name__ : int = Node(7 )
__magic_name__ : List[str] = Node(8 )
__magic_name__ : Union[str, Any] = Node(9 )
print(is_full_binary_tree(_snake_case ) )
print(depth_of_tree(_snake_case ) )
print("Tree is: " )
display(_snake_case )
if __name__ == "__main__":
main()
| 281 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _snake_case ( snake_case ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
__magic_name__ : Dict = BertTokenizer.from_pretrained("bert-base-uncased" )
__magic_name__ : Optional[Any] = bertabert.config.encoder.vocab_size
__magic_name__ : int = tokenizer.sep_token_id
__magic_name__ : Any = tokenizer.cls_token_id
__magic_name__ : Tuple = 128
__magic_name__ : Dict = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
__magic_name__ : List[str] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
__magic_name__ : int = train_dataset.select(range(32 ) )
__magic_name__ : int = val_dataset.select(range(16 ) )
__magic_name__ : Dict = 4
def _map_to_encoder_decoder_inputs(_a ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__magic_name__ : Optional[int] = tokenizer(batch["article"] , padding="max_length" , truncation=_a , max_length=512 )
__magic_name__ : Union[str, Any] = tokenizer(batch["highlights"] , padding="max_length" , truncation=_a , max_length=128 )
__magic_name__ : Union[str, Any] = inputs.input_ids
__magic_name__ : List[Any] = inputs.attention_mask
__magic_name__ : List[Any] = outputs.input_ids
__magic_name__ : List[Any] = outputs.input_ids.copy()
__magic_name__ : List[Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
__magic_name__ : int = outputs.attention_mask
assert all(len(_a ) == 512 for x in inputs.input_ids )
assert all(len(_a ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_a ):
__magic_name__ : List[Any] = pred.label_ids
__magic_name__ : List[Any] = pred.predictions
# all unnecessary tokens are removed
__magic_name__ : Any = tokenizer.batch_decode(_a , skip_special_tokens=_a )
__magic_name__ : Any = tokenizer.batch_decode(_a , skip_special_tokens=_a )
__magic_name__ : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_a ) )] ) / len(_a )
return {"accuracy": accuracy}
# map train dataset
__magic_name__ : Tuple = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
__magic_name__ : Union[str, Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
__magic_name__ : List[Any] = self.get_auto_remove_tmp_dir()
__magic_name__ : str = SeqaSeqTrainingArguments(
output_dir=_a , per_device_train_batch_size=_a , per_device_eval_batch_size=_a , predict_with_generate=_a , evaluation_strategy="steps" , do_train=_a , do_eval=_a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__magic_name__ : List[Any] = SeqaSeqTrainer(
model=_a , args=_a , compute_metrics=_compute_metrics , train_dataset=_a , eval_dataset=_a , tokenizer=_a , )
# start training
trainer.train()
| 281 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = len(_snake_case ) + 1
__magic_name__ : List[str] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ : str = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__magic_name__ : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__magic_name__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ : List[Any] = dp[i - 1][j]
else:
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : Optional[Any] = "aab"
snake_case : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 281 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 281 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 | 1 |
def lowerCAmelCase_ ( _snake_case : int = 50 ) -> int:
'''simple docstring'''
__magic_name__ : str = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 281 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
snake_case : List[Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
snake_case : Any = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
snake_case : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=None , _a=True , _a=False ):
if rouge_types is None:
__magic_name__ : str = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
__magic_name__ : List[str] = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a )
if use_aggregator:
__magic_name__ : Dict = scoring.BootstrapAggregator()
else:
__magic_name__ : str = []
for ref, pred in zip(_a , _a ):
__magic_name__ : Union[str, Any] = scorer.score(_a , _a )
if use_aggregator:
aggregator.add_scores(_a )
else:
scores.append(_a )
if use_aggregator:
__magic_name__ : Any = aggregator.aggregate()
else:
__magic_name__ : List[Any] = {}
for key in scores[0]:
__magic_name__ : str = [score[key] for score in scores]
return result
| 281 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : str = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'pegasus'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _a=50_265 , _a=1_024 , _a=12 , _a=4_096 , _a=16 , _a=12 , _a=4_096 , _a=16 , _a=0.0 , _a=0.0 , _a=True , _a=True , _a="gelu" , _a=1_024 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=0 , _a=False , _a=0 , _a=1 , _a=1 , **_a , ):
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Union[str, Any] = d_model
__magic_name__ : Tuple = encoder_ffn_dim
__magic_name__ : Union[str, Any] = encoder_layers
__magic_name__ : List[Any] = encoder_attention_heads
__magic_name__ : Any = decoder_ffn_dim
__magic_name__ : int = decoder_layers
__magic_name__ : str = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : List[Any] = attention_dropout
__magic_name__ : Dict = activation_dropout
__magic_name__ : Any = activation_function
__magic_name__ : str = init_std
__magic_name__ : Union[str, Any] = encoder_layerdrop
__magic_name__ : Union[str, Any] = decoder_layerdrop
__magic_name__ : List[str] = use_cache
__magic_name__ : List[Any] = encoder_layers
__magic_name__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.d_model
| 281 |
snake_case : Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCAmelCase_ ( _snake_case : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Tuple = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_snake_case )
__magic_name__ : Optional[int] = "".join(bin(_snake_case )[2:].zfill(8 ) for byte in data )
__magic_name__ : List[Any] = len(_snake_case ) % 6 != 0
if padding_needed:
# The padding that will be added later
__magic_name__ : List[str] = B"=" * ((6 - len(_snake_case ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_snake_case ) % 6)
else:
__magic_name__ : List[str] = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_snake_case ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase_ ( _snake_case : str ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ) and not isinstance(_snake_case , _snake_case ):
__magic_name__ : List[str] = (
"argument should be a bytes-like object or ASCII string, "
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_snake_case )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_snake_case , _snake_case ):
try:
__magic_name__ : List[Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
__magic_name__ : List[str] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_snake_case ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__magic_name__ : Optional[int] = encoded_data[:-padding]
__magic_name__ : Dict = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__magic_name__ : Union[str, Any] = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )
__magic_name__ : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_snake_case ) , 8 )
]
return bytes(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 1 |
class _snake_case :
def __init__( self ):
__magic_name__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
__magic_name__ : Any = False
def SCREAMING_SNAKE_CASE ( self , _a ):
for word in words:
self.insert(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : List[str] = self
for char in word:
if char not in curr.nodes:
__magic_name__ : str = TrieNode()
__magic_name__ : Optional[Any] = curr.nodes[char]
__magic_name__ : List[str] = True
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = self
for char in word:
if char not in curr.nodes:
return False
__magic_name__ : Optional[int] = curr.nodes[char]
return curr.is_leaf
def SCREAMING_SNAKE_CASE ( self , _a ):
def _delete(_a , _a , _a ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
__magic_name__ : Dict = False
return len(curr.nodes ) == 0
__magic_name__ : Optional[int] = word[index]
__magic_name__ : Tuple = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__magic_name__ : Union[str, Any] = _delete(_a , _a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _a , 0 )
def lowerCAmelCase_ ( _snake_case : TrieNode , _snake_case : str ) -> None:
'''simple docstring'''
if node.is_leaf:
print(_snake_case , end=" " )
for key, value in node.nodes.items():
print_words(_snake_case , word + key )
def lowerCAmelCase_ ( ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = "banana bananas bandana band apple all beast".split()
__magic_name__ : Dict = TrieNode()
root.insert_many(_snake_case )
# print_words(root, "")
assert all(root.find(_snake_case ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowerCAmelCase_ ( _snake_case : str , _snake_case : bool ) -> None:
'''simple docstring'''
print(str(_snake_case ) , "works!" if passes else "doesn't work :(" )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
assert test_trie()
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 281 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
__magic_name__ : List[Any] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Optional[Any] = use_attention_mask
__magic_name__ : Optional[Any] = use_token_type_ids
__magic_name__ : int = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Any = intermediate_size
__magic_name__ : List[Any] = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : List[Any] = num_choices
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_attention_mask:
__magic_name__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : str = None
if self.use_token_type_ids:
__magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = config_and_inputs
__magic_name__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = config_and_inputs
__magic_name__ : Tuple = True
__magic_name__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_class_name in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : List[str] = model(_a )[0]
__magic_name__ : str = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _a )
# compare the actual values for a slice.
__magic_name__ : List[str] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Tuple = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Dict = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 281 | 1 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = IFImgaImgSuperResolutionPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE ( self ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
__magic_name__ : Dict = torch.manual_seed(_a )
else:
__magic_name__ : Any = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__magic_name__ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
__magic_name__ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def SCREAMING_SNAKE_CASE ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 281 |
def lowerCAmelCase_ ( _snake_case : list[list[int | float]] ) -> int:
'''simple docstring'''
__magic_name__ : Any = len(_snake_case )
__magic_name__ : Optional[Any] = len(matrix[0] )
__magic_name__ : Union[str, Any] = min(_snake_case , _snake_case )
for row in range(_snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _snake_case ):
__magic_name__ : Optional[Any] = matrix[col][row] / matrix[row][row]
for i in range(_snake_case , _snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__magic_name__ : str = True
for i in range(row + 1 , _snake_case ):
if matrix[i][row] != 0:
__magic_name__ , __magic_name__ : List[str] = matrix[i], matrix[row]
__magic_name__ : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(_snake_case ):
__magic_name__ : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
snake_case : List[Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
snake_case : Any = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
snake_case : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=None , _a=True , _a=False ):
if rouge_types is None:
__magic_name__ : str = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
__magic_name__ : List[str] = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a )
if use_aggregator:
__magic_name__ : Dict = scoring.BootstrapAggregator()
else:
__magic_name__ : str = []
for ref, pred in zip(_a , _a ):
__magic_name__ : Union[str, Any] = scorer.score(_a , _a )
if use_aggregator:
aggregator.add_scores(_a )
else:
scores.append(_a )
if use_aggregator:
__magic_name__ : Any = aggregator.aggregate()
else:
__magic_name__ : List[Any] = {}
for key in scores[0]:
__magic_name__ : str = [score[key] for score in scores]
return result
| 281 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case : Dict = re.compile(R"\b(a|an|the)\b", re.UNICODE)
snake_case : Optional[int] = None
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_snake_case , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_snake_case , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : str = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(_snake_case : List[str] ):
return ARTICLES_REGEX.sub(" " , _snake_case )
def white_space_fix(_snake_case : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_snake_case : Optional[int] ):
__magic_name__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_snake_case ) ) ) )
def lowerCAmelCase_ ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(_snake_case ).split()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Dict ) -> Tuple:
'''simple docstring'''
return int(normalize_answer(_snake_case ) == normalize_answer(_snake_case ) )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : int ) -> str:
'''simple docstring'''
__magic_name__ : Any = get_tokens(_snake_case )
__magic_name__ : Optional[int] = get_tokens(_snake_case )
__magic_name__ : Tuple = collections.Counter(_snake_case ) & collections.Counter(_snake_case )
__magic_name__ : Tuple = sum(common.values() )
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__magic_name__ : Dict = 1.0 * num_same / len(_snake_case )
__magic_name__ : Optional[Any] = 1.0 * num_same / len(_snake_case )
__magic_name__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : Union[str, Any] = qa["id"]
__magic_name__ : Any = [t for t in qa["answers"]["text"] if normalize_answer(_snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__magic_name__ : Tuple = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
__magic_name__ : Any = preds[qid]
# Take max over all gold answers
__magic_name__ : List[Any] = max(compute_exact(_snake_case , _snake_case ) for a in gold_answers )
__magic_name__ : int = max(compute_fa(_snake_case , _snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = {}
for qid, s in scores.items():
__magic_name__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
__magic_name__ : str = float(not qid_to_has_ans[qid] )
else:
__magic_name__ : Optional[int] = s
return new_scores
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple=None ) -> Tuple:
'''simple docstring'''
if not qid_list:
__magic_name__ : Any = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
__magic_name__ : Tuple = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : str , _snake_case : str ) -> Dict:
'''simple docstring'''
for k in new_eval:
__magic_name__ : int = new_eval[k]
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
plt.step(_snake_case , _snake_case , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_snake_case , _snake_case , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_snake_case )
plt.savefig(_snake_case )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]=None , _snake_case : int=None ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
__magic_name__ : Optional[int] = 0.0
__magic_name__ : str = 1.0
__magic_name__ : str = 0.0
__magic_name__ : List[str] = [1.0]
__magic_name__ : str = [0.0]
__magic_name__ : Optional[Any] = 0.0
for i, qid in enumerate(_snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__magic_name__ : List[str] = true_pos / float(i + 1 )
__magic_name__ : Any = true_pos / float(_snake_case )
if i == len(_snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_snake_case )
recalls.append(_snake_case )
if out_image:
plot_pr_curve(_snake_case , _snake_case , _snake_case , _snake_case )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
__magic_name__ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
__magic_name__ : Union[str, Any] = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
__magic_name__ : str = {k: float(_snake_case ) for k, v in qid_to_has_ans.items()}
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_snake_case , _snake_case , "pr_exact" )
merge_eval(_snake_case , _snake_case , "pr_f1" )
merge_eval(_snake_case , _snake_case , "pr_oracle" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
if not qid_list:
return
__magic_name__ : Dict = [na_probs[k] for k in qid_list]
__magic_name__ : str = np.ones_like(_snake_case ) / float(len(_snake_case ) )
plt.hist(_snake_case , weights=_snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_snake_case , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__magic_name__ : List[str] = num_no_ans
__magic_name__ : Dict = cur_score
__magic_name__ : Dict = 0.0
__magic_name__ : Any = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
for i, qid in enumerate(_snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__magic_name__ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
__magic_name__ : List[Any] = -1
else:
__magic_name__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
__magic_name__ : Optional[int] = cur_score
__magic_name__ : List[Any] = na_probs[qid]
return 100.0 * best_score / len(_snake_case ), best_thresh
def lowerCAmelCase_ ( _snake_case : int , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : List[str] = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ , __magic_name__ : int = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ : Optional[int] = best_exact
__magic_name__ : List[Any] = exact_thresh
__magic_name__ : Dict = best_fa
__magic_name__ : Any = fa_thresh
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
with open(OPTS.data_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
__magic_name__ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__magic_name__ : Any = json.load(_snake_case )
else:
__magic_name__ : Any = {k: 0.0 for k in preds}
__magic_name__ : str = make_qid_to_has_ans(_snake_case ) # maps qid to True/False
__magic_name__ : Tuple = [k for k, v in qid_to_has_ans.items() if v]
__magic_name__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
__magic_name__ , __magic_name__ : Union[str, Any] = get_raw_scores(_snake_case , _snake_case )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case )
if has_ans_qids:
__magic_name__ : int = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "HasAns" )
if no_ans_qids:
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , OPTS.out_image_dir )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_snake_case , _snake_case )
else:
print(json.dumps(_snake_case , indent=2 ) )
if __name__ == "__main__":
snake_case : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 281 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Any , _snake_case : List[Any] ) -> Dict:
'''simple docstring'''
__magic_name__ : str = tmp_path / "cache"
__magic_name__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ : str = ParquetDatasetReader(_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_parquet_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : List[Any] , _snake_case : Any ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = tmp_path / "cache"
__magic_name__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ : List[str] = features.copy() if features else default_expected_features
__magic_name__ : Tuple = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : Dict = ParquetDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
_check_parquet_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : List[Any] , _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : List[str] = tmp_path / "cache"
__magic_name__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ : Any = ParquetDatasetReader(_snake_case , cache_dir=_snake_case , split=_snake_case ).read()
_check_parquet_dataset(_snake_case , _snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = parquet_path
elif issubclass(_snake_case , _snake_case ):
__magic_name__ : List[str] = [parquet_path]
__magic_name__ : Dict = tmp_path / "cache"
__magic_name__ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ : Any = ParquetDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_parquet_dataset(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Dict=("train",) ) -> str:
'''simple docstring'''
assert isinstance(_snake_case , _snake_case )
for split in splits:
__magic_name__ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : int , _snake_case : List[str] ) -> int:
'''simple docstring'''
__magic_name__ : Any = tmp_path / "cache"
__magic_name__ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ : Optional[Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_parquet_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : List[Any] = tmp_path / "cache"
__magic_name__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ : Dict = features.copy() if features else default_expected_features
__magic_name__ : Optional[Any] = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : str = ParquetDatasetReader({"train": parquet_path} , features=_snake_case , cache_dir=_snake_case ).read()
_check_parquet_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
if split:
__magic_name__ : Union[str, Any] = {split: parquet_path}
else:
__magic_name__ : Union[str, Any] = "train"
__magic_name__ : List[str] = {"train": parquet_path, "test": parquet_path}
__magic_name__ : Union[str, Any] = tmp_path / "cache"
__magic_name__ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__magic_name__ : str = ParquetDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_parquet_datasetdict(_snake_case , _snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ParquetDatasetWriter(_snake_case , tmp_path / "foo.parquet" )
assert writer.write() > 0
__magic_name__ : Any = pq.ParquetFile(tmp_path / "foo.parquet" )
__magic_name__ : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : List[Any] ) -> str:
'''simple docstring'''
__magic_name__ : str = str(shared_datadir / "test_image_rgb.jpg" )
__magic_name__ : Dict = {"image": [image_path]}
__magic_name__ : Dict = Features({"image": Image()} )
__magic_name__ : Any = Dataset.from_dict(_snake_case , features=_snake_case )
__magic_name__ : Union[str, Any] = ParquetDatasetWriter(_snake_case , tmp_path / "foo.parquet" )
assert writer.write() > 0
__magic_name__ : Optional[Any] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
__magic_name__ : Optional[Any] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
assert get_writer_batch_size(_snake_case ) == expected
| 281 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : str = "▁"
snake_case : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = BigBirdTokenizer
UpperCamelCase__ = BigBirdTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
__magic_name__ : Optional[Any] = self.tokenizer_class(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = "<s>"
__magic_name__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_a ) , 1_004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Any = "I was born in 92000, and this is falsé."
__magic_name__ : Dict = tokenizer.tokenize(_a )
__magic_name__ : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__magic_name__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
__magic_name__ : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Dict = tokenizer.encode(_a )
__magic_name__ : Optional[int] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = BigBirdTokenizer(_a , keep_accents=_a )
__magic_name__ : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
__magic_name__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = "Hello World!"
__magic_name__ : Dict = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
__magic_name__ : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__magic_name__ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__magic_name__ : List[Any] = " ".join(_a )
__magic_name__ : Any = self.big_tokenizer.encode_plus(_a , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : List[str] = BigBirdConfig(attention_type="original_full" )
__magic_name__ : Optional[int] = BigBirdModel(_a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
__magic_name__ : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
__magic_name__ : Optional[Any] = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 281 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _snake_case :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ):
__magic_name__ : int = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : List[Any] = is_training
__magic_name__ : Dict = use_labels
__magic_name__ : Optional[Any] = vocab_size
__magic_name__ : int = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Any = eos_token_id
__magic_name__ : str = pad_token_id
__magic_name__ : int = bos_token_id
__magic_name__ : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ : Tuple = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ : List[str] = prepare_led_inputs_dict(_a , _a , _a )
__magic_name__ : Union[str, Any] = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
__magic_name__ : List[Any] = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Dict = TFLEDModel(config=_a ).get_decoder()
__magic_name__ : Optional[int] = inputs_dict["input_ids"]
__magic_name__ : Union[str, Any] = input_ids[:1, :]
__magic_name__ : str = inputs_dict["attention_mask"][:1, :]
__magic_name__ : int = 1
# first forward pass
__magic_name__ : Tuple = model(_a , attention_mask=_a , use_cache=_a )
__magic_name__ , __magic_name__ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ : List[str] = model(_a , attention_mask=_a )[0]
__magic_name__ : Dict = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Any , _snake_case : str=None , _snake_case : List[str]=None , _snake_case : int=None , _snake_case : Any=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
__magic_name__ : str = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFLEDModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[str] = tf.zeros_like(inputs_dict["attention_mask"] )
__magic_name__ : Optional[Any] = 2
__magic_name__ : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__magic_name__ : Any = True
__magic_name__ : str = self.model_tester.seq_length
__magic_name__ : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
__magic_name__ : str = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
__magic_name__ : Any = [t.numpy() for t in outputs.encoder_attentions]
__magic_name__ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = False
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = model_class(_a )
__magic_name__ : str = model(self._prepare_for_class(_a , _a ) )
__magic_name__ : Any = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__magic_name__ : Tuple = model_class(_a )
__magic_name__ : Optional[Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Dict = True
__magic_name__ : str = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__magic_name__ : Union[str, Any] = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
return tf.constant(_snake_case , dtype=tf.intaa )
snake_case : Optional[int] = 1E-4
@slow
@require_tf
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__magic_name__ : Optional[int] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Any = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : List[Any] = model(**_a )[0]
__magic_name__ : List[str] = (1, 1_024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : int = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__magic_name__ : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Optional[Any] = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : Union[str, Any] = model(**_a )[0]
__magic_name__ : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : str = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
| 281 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : List[str] = {"vocab_file": "spiece.model"}
snake_case : List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
snake_case : Tuple = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
snake_case : List[str] = "▁"
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ : str = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
__magic_name__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Dict = do_lower_case
__magic_name__ : Tuple = remove_space
__magic_name__ : Union[str, Any] = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__magic_name__ : List[str] = self.__dict__.copy()
__magic_name__ : Any = None
return state
def __setstate__( self , _a ):
__magic_name__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : str = {}
__magic_name__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.remove_space:
__magic_name__ : List[Any] = " ".join(inputs.strip().split() )
else:
__magic_name__ : str = inputs
__magic_name__ : int = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__magic_name__ : str = unicodedata.normalize("NFKD" , _a )
__magic_name__ : Tuple = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__magic_name__ : int = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = self.preprocess_text(_a )
__magic_name__ : Dict = self.sp_model.encode(_a , out_type=_a )
__magic_name__ : Any = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__magic_name__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ : List[str] = cur_pieces[1:]
else:
__magic_name__ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = []
__magic_name__ : Union[str, Any] = ""
__magic_name__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__magic_name__ : List[Any] = True
__magic_name__ : Optional[int] = []
else:
current_sub_tokens.append(_a )
__magic_name__ : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[int] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : List[str] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 281 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = np.full((len(_snake_case ), sequence_length, 2) , _snake_case )
else:
__magic_name__ : List[Any] = np.full((len(_snake_case ), sequence_length) , _snake_case )
for i, tensor in enumerate(_snake_case ):
if padding_side == "right":
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = tensor[:sequence_length]
else:
__magic_name__ : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(_snake_case , _snake_case ):
__magic_name__ : List[Any] = tensor[:sequence_length]
else:
__magic_name__ : Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ord(_snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__magic_name__ : Any = unicodedata.category(_snake_case )
if cat.startswith("P" ):
return True
return False
@dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -100
UpperCamelCase__ = "pt"
def SCREAMING_SNAKE_CASE ( self , _a ):
import torch
__magic_name__ : List[str] = "label" if "label" in features[0].keys() else "labels"
__magic_name__ : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__magic_name__ : Optional[int] = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__magic_name__ : Dict = torch.tensor(batch["entity_ids"] ).shape[1]
__magic_name__ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
__magic_name__ : str = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
__magic_name__ : int = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
__magic_name__ : Dict = [feature["ner_tags"] for feature in features]
__magic_name__ : List[Any] = padding_tensor(_a , -1 , _a , _a )
__magic_name__ : Any = [feature["original_entity_spans"] for feature in features]
__magic_name__ : Any = padding_tensor(_a , (-1, -1) , _a , _a )
__magic_name__ : List[Any] = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 281 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = BartphoTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
__magic_name__ : Union[str, Any] = ["▁This", "▁is", "▁a", "▁t", "est"]
__magic_name__ : int = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : int = {"unk_token": "<unk>"}
__magic_name__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
__magic_name__ : Union[str, Any] = BartphoTokenizer(_a , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self , **_a ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = "This is a là test"
__magic_name__ : Dict = "This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = BartphoTokenizer(_a , self.monolingual_vocab_file , **self.special_tokens_map )
__magic_name__ : Optional[Any] = "This is a là test"
__magic_name__ : str = "▁This ▁is ▁a ▁l à ▁t est".split()
__magic_name__ : Any = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__magic_name__ : Tuple = tokens + [tokenizer.unk_token]
__magic_name__ : str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
| 281 |
import math
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return math.pow(_snake_case , 2 ) - a
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
__magic_name__ : Optional[int] = 2.0
while start <= a:
__magic_name__ : str = math.pow(_snake_case , 2 )
return start
def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
__magic_name__ : Optional[int] = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__magic_name__ : int = value
__magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case : str = logging.get_logger(__name__)
snake_case : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
snake_case : Tuple = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
snake_case : Dict = {
"gpt2": 1_024,
"gpt2-medium": 1_024,
"gpt2-large": 1_024,
"gpt2-xl": 1_024,
"distilgpt2": 1_024,
}
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
UpperCamelCase__ = GPTaTokenizer
def __init__( self , _a=None , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , **_a , ):
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
__magic_name__ : Tuple = kwargs.pop("add_bos_token" , _a )
__magic_name__ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _a ) != add_prefix_space:
__magic_name__ : Optional[int] = getattr(_a , pre_tok_state.pop("type" ) )
__magic_name__ : Dict = add_prefix_space
__magic_name__ : str = pre_tok_class(**_a )
__magic_name__ : int = add_prefix_space
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
__magic_name__ : str = kwargs.get("is_split_into_words" , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
__magic_name__ : str = kwargs.get("is_split_into_words" , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : List[str] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
__magic_name__ : Any = input_ids[-self.model_max_length :]
return input_ids
| 281 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _snake_case :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ):
__magic_name__ : int = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : List[Any] = is_training
__magic_name__ : Dict = use_labels
__magic_name__ : Optional[Any] = vocab_size
__magic_name__ : int = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Any = eos_token_id
__magic_name__ : str = pad_token_id
__magic_name__ : int = bos_token_id
__magic_name__ : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ : Tuple = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ : List[str] = prepare_led_inputs_dict(_a , _a , _a )
__magic_name__ : Union[str, Any] = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
__magic_name__ : List[Any] = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Dict = TFLEDModel(config=_a ).get_decoder()
__magic_name__ : Optional[int] = inputs_dict["input_ids"]
__magic_name__ : Union[str, Any] = input_ids[:1, :]
__magic_name__ : str = inputs_dict["attention_mask"][:1, :]
__magic_name__ : int = 1
# first forward pass
__magic_name__ : Tuple = model(_a , attention_mask=_a , use_cache=_a )
__magic_name__ , __magic_name__ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ : List[str] = model(_a , attention_mask=_a )[0]
__magic_name__ : Dict = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Any , _snake_case : str=None , _snake_case : List[str]=None , _snake_case : int=None , _snake_case : Any=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
__magic_name__ : str = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFLEDModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[str] = tf.zeros_like(inputs_dict["attention_mask"] )
__magic_name__ : Optional[Any] = 2
__magic_name__ : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__magic_name__ : Any = True
__magic_name__ : str = self.model_tester.seq_length
__magic_name__ : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
__magic_name__ : str = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
__magic_name__ : Any = [t.numpy() for t in outputs.encoder_attentions]
__magic_name__ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = False
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = model_class(_a )
__magic_name__ : str = model(self._prepare_for_class(_a , _a ) )
__magic_name__ : Any = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__magic_name__ : Tuple = model_class(_a )
__magic_name__ : Optional[Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Dict = True
__magic_name__ : str = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__magic_name__ : Union[str, Any] = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
return tf.constant(_snake_case , dtype=tf.intaa )
snake_case : Optional[int] = 1E-4
@slow
@require_tf
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__magic_name__ : Optional[int] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Any = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : List[Any] = model(**_a )[0]
__magic_name__ : List[str] = (1, 1_024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : int = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__magic_name__ : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Optional[Any] = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : Union[str, Any] = model(**_a )[0]
__magic_name__ : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : str = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
| 281 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( snake_case , snake_case ):
@register_to_config
def __init__( self , _a = 128 , _a = 256 , _a = 20_00.0 , _a = 768 , _a = 12 , _a = 12 , _a = 64 , _a = 2_048 , _a = 0.1 , ):
super().__init__()
__magic_name__ : Any = nn.Sequential(
nn.Linear(_a , d_model * 4 , bias=_a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_a ) , nn.SiLU() , )
__magic_name__ : Optional[int] = nn.Embedding(_a , _a )
__magic_name__ : Optional[Any] = False
__magic_name__ : Any = nn.Linear(_a , _a , bias=_a )
__magic_name__ : Union[str, Any] = nn.Dropout(p=_a )
__magic_name__ : Dict = nn.ModuleList()
for lyr_num in range(_a ):
# FiLM conditional T5 decoder
__magic_name__ : Union[str, Any] = DecoderLayer(d_model=_a , d_kv=_a , num_heads=_a , d_ff=_a , dropout_rate=_a )
self.decoders.append(_a )
__magic_name__ : Tuple = TaLayerNorm(_a )
__magic_name__ : List[str] = nn.Dropout(p=_a )
__magic_name__ : str = nn.Linear(_a , _a , bias=_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Union[str, Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__magic_name__ : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__magic_name__ : Any = self.conditioning_emb(_a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__magic_name__ : List[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__magic_name__ : str = torch.broadcast_to(
torch.arange(_a , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__magic_name__ : Optional[Any] = self.position_encoding(_a )
__magic_name__ : Dict = self.continuous_inputs_projection(_a )
inputs += position_encodings
__magic_name__ : Optional[int] = self.dropout(_a )
# decoder: No padding present.
__magic_name__ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__magic_name__ : int = [(x, self.encoder_decoder_mask(_a , _a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__magic_name__ : str = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__magic_name__ : Dict = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__magic_name__ : Dict = lyr(
_a , conditioning_emb=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )[0]
__magic_name__ : int = self.decoder_norm(_a )
__magic_name__ : Union[str, Any] = self.post_dropout(_a )
__magic_name__ : Tuple = self.spec_out(_a )
return spec_out
class _snake_case ( nn.Module ):
def __init__( self , _a , _a , _a , _a , _a , _a=1e-6 ):
super().__init__()
__magic_name__ : int = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_a , d_kv=_a , num_heads=_a , dropout_rate=_a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_a , d_kv=_a , num_heads=_a , dropout_rate=_a , layer_norm_epsilon=_a , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_a , d_ff=_a , dropout_rate=_a , layer_norm_epsilon=_a ) )
def SCREAMING_SNAKE_CASE ( self , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ):
__magic_name__ : Optional[Any] = self.layer[0](
_a , conditioning_emb=_a , attention_mask=_a , )
if encoder_hidden_states is not None:
__magic_name__ : Optional[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
__magic_name__ : List[Any] = self.layer[1](
_a , key_value_states=_a , attention_mask=_a , )
# Apply Film Conditional Feed Forward layer
__magic_name__ : Dict = self.layer[-1](_a , _a )
return (hidden_states,)
class _snake_case ( nn.Module ):
def __init__( self , _a , _a , _a , _a ):
super().__init__()
__magic_name__ : Optional[int] = TaLayerNorm(_a )
__magic_name__ : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=_a )
__magic_name__ : List[str] = Attention(query_dim=_a , heads=_a , dim_head=_a , out_bias=_a , scale_qk=_a )
__magic_name__ : Tuple = nn.Dropout(_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a=None , _a=None , ):
# pre_self_attention_layer_norm
__magic_name__ : int = self.layer_norm(_a )
if conditioning_emb is not None:
__magic_name__ : Optional[int] = self.FiLMLayer(_a , _a )
# Self-attention block
__magic_name__ : int = self.attention(_a )
__magic_name__ : str = hidden_states + self.dropout(_a )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self , _a , _a , _a , _a , _a ):
super().__init__()
__magic_name__ : List[Any] = Attention(query_dim=_a , heads=_a , dim_head=_a , out_bias=_a , scale_qk=_a )
__magic_name__ : Union[str, Any] = TaLayerNorm(_a , eps=_a )
__magic_name__ : Optional[Any] = nn.Dropout(_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a=None , _a=None , ):
__magic_name__ : Any = self.layer_norm(_a )
__magic_name__ : Dict = self.attention(
_a , encoder_hidden_states=_a , attention_mask=attention_mask.squeeze(1 ) , )
__magic_name__ : Optional[Any] = hidden_states + self.dropout(_a )
return layer_output
class _snake_case ( nn.Module ):
def __init__( self , _a , _a , _a , _a ):
super().__init__()
__magic_name__ : Any = TaDenseGatedActDense(d_model=_a , d_ff=_a , dropout_rate=_a )
__magic_name__ : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_a )
__magic_name__ : int = TaLayerNorm(_a , eps=_a )
__magic_name__ : List[Any] = nn.Dropout(_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a=None ):
__magic_name__ : List[Any] = self.layer_norm(_a )
if conditioning_emb is not None:
__magic_name__ : Optional[Any] = self.film(_a , _a )
__magic_name__ : Optional[Any] = self.DenseReluDense(_a )
__magic_name__ : Union[str, Any] = hidden_states + self.dropout(_a )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self , _a , _a , _a ):
super().__init__()
__magic_name__ : int = nn.Linear(_a , _a , bias=_a )
__magic_name__ : int = nn.Linear(_a , _a , bias=_a )
__magic_name__ : Dict = nn.Linear(_a , _a , bias=_a )
__magic_name__ : int = nn.Dropout(_a )
__magic_name__ : Any = NewGELUActivation()
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = self.act(self.wi_a(_a ) )
__magic_name__ : List[str] = self.wi_a(_a )
__magic_name__ : Tuple = hidden_gelu * hidden_linear
__magic_name__ : Tuple = self.dropout(_a )
__magic_name__ : Optional[Any] = self.wo(_a )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self , _a , _a=1e-6 ):
super().__init__()
__magic_name__ : Tuple = nn.Parameter(torch.ones(_a ) )
__magic_name__ : Union[str, Any] = eps
def SCREAMING_SNAKE_CASE ( self , _a ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__magic_name__ : str = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_a )
__magic_name__ : List[str] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__magic_name__ : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _snake_case ( nn.Module ):
def SCREAMING_SNAKE_CASE ( self , _a ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(_a , 3.0 )) ))
class _snake_case ( nn.Module ):
def __init__( self , _a , _a ):
super().__init__()
__magic_name__ : List[Any] = nn.Linear(_a , out_features * 2 , bias=_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Union[str, Any] = self.scale_bias(_a )
__magic_name__ , __magic_name__ : Optional[Any] = torch.chunk(_a , 2 , -1 )
__magic_name__ : Any = x * (1 + scale) + shift
return x
| 281 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=False ) -> List[str]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__magic_name__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Any , _snake_case : Dict=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ : int = ""
else:
__magic_name__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__magic_name__ : int = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Dict = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : List[str] = in_proj_bias[: config.hidden_size]
__magic_name__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : int = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = dct.pop(_snake_case )
__magic_name__ : List[Any] = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ : List[str] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Any , _snake_case : int=False ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_snake_case , )
__magic_name__ : List[str] = ViTHybridConfig(backbone_config=_snake_case , image_size=384 , num_labels=1000 )
__magic_name__ : str = False
# load original model from timm
__magic_name__ : Union[str, Any] = timm.create_model(_snake_case , pretrained=_snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__magic_name__ : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
__magic_name__ : Tuple = create_rename_keys(_snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , _snake_case )
__magic_name__ : List[str] = "huggingface/label-files"
__magic_name__ : int = "imagenet-1k-id2label.json"
__magic_name__ : Optional[int] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
__magic_name__ : int = {int(_snake_case ): v for k, v in idalabel.items()}
__magic_name__ : List[str] = idalabel
__magic_name__ : List[str] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__magic_name__ : List[str] = ViTHybridModel(_snake_case ).eval()
else:
__magic_name__ : str = ViTHybridForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# create image processor
__magic_name__ : List[Any] = create_transform(**resolve_data_config({} , model=_snake_case ) )
__magic_name__ : int = transform.transforms
__magic_name__ : List[str] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
__magic_name__ : int = ViTHybridImageProcessor(
do_resize=_snake_case , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_snake_case , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__magic_name__ : List[Any] = prepare_img()
__magic_name__ : Any = transform(_snake_case ).unsqueeze(0 )
__magic_name__ : Tuple = processor(_snake_case , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_snake_case , _snake_case )
# verify logits
with torch.no_grad():
__magic_name__ : Optional[int] = model(_snake_case )
__magic_name__ : List[str] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
__magic_name__ : List[str] = timm_model.forward_features(_snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 )
else:
__magic_name__ : Any = timm_model(_snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 | 1 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=snake_case ):
UpperCamelCase__ = ['torch', 'torchsde']
def __init__( self , *_a , **_a ):
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_a , **_a ):
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_a , **_a ):
requires_backends(cls , ["torch", "torchsde"] )
| 281 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : List[str] = "facebook/wmt19-en-de"
snake_case : Dict = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
snake_case : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt")
snake_case : List[str] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
snake_case : Dict = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 281 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Optional[Any] = {"vocab_file": "spiece.model"}
snake_case : Union[str, Any] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
snake_case : Any = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
snake_case : Optional[int] = 0
snake_case : Any = 1
snake_case : Union[str, Any] = 2
snake_case : List[str] = 3
snake_case : Union[str, Any] = 4
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = 'left'
def __init__( self , _a , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
__magic_name__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Union[str, Any] = 3
__magic_name__ : Any = do_lower_case
__magic_name__ : int = remove_space
__magic_name__ : Dict = keep_accents
__magic_name__ : Any = vocab_file
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__magic_name__ : List[Any] = self.__dict__.copy()
__magic_name__ : List[str] = None
return state
def __setstate__( self , _a ):
__magic_name__ : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : Optional[int] = {}
__magic_name__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.remove_space:
__magic_name__ : str = " ".join(inputs.strip().split() )
else:
__magic_name__ : Optional[Any] = inputs
__magic_name__ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__magic_name__ : Any = unicodedata.normalize("NFKD" , _a )
__magic_name__ : Tuple = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__magic_name__ : str = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : str = self.preprocess_text(_a )
__magic_name__ : Tuple = self.sp_model.encode(_a , out_type=_a )
__magic_name__ : List[str] = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__magic_name__ : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ : Optional[int] = cur_pieces[1:]
else:
__magic_name__ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = "".join(_a ).replace(_a , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = False , _a = None , _a = True , **_a , ):
__magic_name__ : Optional[Any] = kwargs.pop("use_source_tokenizer" , _a )
__magic_name__ : str = self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__magic_name__ : int = []
__magic_name__ : Union[str, Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
__magic_name__ : Optional[int] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__magic_name__ : Dict = "".join(_a )
__magic_name__ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__magic_name__ : str = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Tuple = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 281 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[str] = np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_snake_case , encoding="utf_8" ) as f:
__magic_name__ : List[str] = csv.reader(_snake_case )
__magic_name__ : List[Any] = []
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = []
for dataset in encoded_datasets:
__magic_name__ : Union[str, Any] = len(_snake_case )
__magic_name__ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__magic_name__ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
__magic_name__ : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__magic_name__ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : str = with_conta
__magic_name__ : Tuple = with_conta
__magic_name__ : Union[str, Any] = len(_snake_case ) - 1
__magic_name__ : int = len(_snake_case ) - 1
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[int] = mc_label
__magic_name__ : str = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_snake_case , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_snake_case , default="" )
parser.add_argument("--eval_dataset" , type=_snake_case , default="" )
parser.add_argument("--seed" , type=_snake_case , default=42 )
parser.add_argument("--num_train_epochs" , type=_snake_case , default=3 )
parser.add_argument("--train_batch_size" , type=_snake_case , default=8 )
parser.add_argument("--eval_batch_size" , type=_snake_case , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_snake_case , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_snake_case , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_snake_case , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_snake_case , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_snake_case , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_snake_case , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_snake_case , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_snake_case , default=0.01 )
parser.add_argument("--lm_coef" , type=_snake_case , default=0.9 )
parser.add_argument("--n_valid" , type=_snake_case , default=374 )
parser.add_argument("--server_ip" , type=_snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_snake_case , default="" , help="Can be used for distant debugging." )
__magic_name__ : List[Any] = parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__magic_name__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__magic_name__ : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__magic_name__ : List[Any] = ["_start_", "_delimiter_", "_classify_"]
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
__magic_name__ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(_snake_case : str ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info("Encoding dataset..." )
__magic_name__ : Optional[int] = load_rocstories_dataset(args.train_dataset )
__magic_name__ : str = load_rocstories_dataset(args.eval_dataset )
__magic_name__ : int = (train_dataset, eval_dataset)
__magic_name__ : List[str] = tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
__magic_name__ : Optional[Any] = model.config.n_positions // 2 - 2
__magic_name__ : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__magic_name__ : List[str] = min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__magic_name__ : List[Any] = pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
__magic_name__ , __magic_name__ : Optional[int] = tensor_datasets[0], tensor_datasets[1]
__magic_name__ : Tuple = TensorDataset(*_snake_case )
__magic_name__ : Union[str, Any] = RandomSampler(_snake_case )
__magic_name__ : Dict = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
__magic_name__ : Any = TensorDataset(*_snake_case )
__magic_name__ : Optional[Any] = SequentialSampler(_snake_case )
__magic_name__ : int = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__magic_name__ : Tuple = args.max_steps
__magic_name__ : List[str] = args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
__magic_name__ : List[str] = len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__magic_name__ : str = list(model.named_parameters() )
__magic_name__ : Dict = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__magic_name__ : str = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__magic_name__ : str = AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__magic_name__ : List[str] = get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__magic_name__ : List[str] = 0
__magic_name__ : Tuple = 0
__magic_name__ : Dict = tqdm(_snake_case , desc="Training" )
for step, batch in enumerate(_snake_case ):
__magic_name__ : Optional[Any] = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = batch
__magic_name__ : Optional[Any] = model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__magic_name__ : List[str] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__magic_name__ : int = "Training loss: {:.2e} lr: {:.2e}".format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__magic_name__ : Dict = model.module if hasattr(_snake_case , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__magic_name__ : List[Any] = os.path.join(args.output_dir , _snake_case )
__magic_name__ : Dict = os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__magic_name__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
__magic_name__ , __magic_name__ : Any = 0, 0
__magic_name__ , __magic_name__ : Union[str, Any] = 0, 0
for batch in tqdm(_snake_case , desc="Evaluating" ):
__magic_name__ : int = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = batch
with torch.no_grad():
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Tuple = mc_logits.detach().cpu().numpy()
__magic_name__ : Any = mc_labels.to("cpu" ).numpy()
__magic_name__ : str = accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__magic_name__ : Tuple = eval_loss / nb_eval_steps
__magic_name__ : List[Any] = eval_accuracy / nb_eval_examples
__magic_name__ : int = tr_loss / nb_tr_steps if args.do_train else None
__magic_name__ : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__magic_name__ : int = os.path.join(args.output_dir , "eval_results.txt" )
with open(_snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _snake_case , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 281 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ):
__magic_name__ : int = size if size is not None else {"height": 18, "width": 18}
__magic_name__ : Union[str, Any] = parent
__magic_name__ : int = batch_size
__magic_name__ : List[Any] = num_channels
__magic_name__ : str = image_size
__magic_name__ : Optional[int] = min_resolution
__magic_name__ : List[str] = max_resolution
__magic_name__ : str = do_resize
__magic_name__ : Optional[int] = size
__magic_name__ : Tuple = do_normalize
__magic_name__ : Optional[Any] = image_mean
__magic_name__ : int = image_std
def SCREAMING_SNAKE_CASE ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = DPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = DPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def SCREAMING_SNAKE_CASE ( self ):
# Initialize image_processing
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__magic_name__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ : str = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self ):
# Initialize image_processing
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
__magic_name__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ : List[Any] = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self ):
# Initialize image_processing
__magic_name__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
__magic_name__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ : Any = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 281 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 | 1 |
def lowerCAmelCase_ ( _snake_case : list[list[int | float]] ) -> int:
'''simple docstring'''
__magic_name__ : Any = len(_snake_case )
__magic_name__ : Optional[Any] = len(matrix[0] )
__magic_name__ : Union[str, Any] = min(_snake_case , _snake_case )
for row in range(_snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _snake_case ):
__magic_name__ : Optional[Any] = matrix[col][row] / matrix[row][row]
for i in range(_snake_case , _snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__magic_name__ : str = True
for i in range(row + 1 , _snake_case ):
if matrix[i][row] != 0:
__magic_name__ , __magic_name__ : List[str] = matrix[i], matrix[row]
__magic_name__ : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(_snake_case ):
__magic_name__ : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = "mock-s3-bucket"
__magic_name__ : Any = F'''s3://{mock_bucket}'''
__magic_name__ : str = extract_path_from_uri(_snake_case )
assert dataset_path.startswith("s3://" ) is False
__magic_name__ : Tuple = "./local/path"
__magic_name__ : Optional[Any] = extract_path_from_uri(_snake_case )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : str = is_remote_filesystem(_snake_case )
assert is_remote is True
__magic_name__ : Optional[int] = fsspec.filesystem("file" )
__magic_name__ : int = is_remote_filesystem(_snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__magic_name__ : str = input_paths[compression_fs_class.protocol]
if input_path is None:
__magic_name__ : Dict = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_snake_case )
__magic_name__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case )
assert isinstance(_snake_case , _snake_case )
__magic_name__ : int = os.path.basename(_snake_case )
__magic_name__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_snake_case , "r" , encoding="utf-8" ) as f, open(_snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : int = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__magic_name__ : int = compressed_file_paths[protocol]
__magic_name__ : Tuple = "dataset.jsonl"
__magic_name__ : List[str] = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__magic_name__ , *__magic_name__ : Optional[Any] = fsspec.get_fs_token_paths(_snake_case )
assert fs.isfile(_snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ) -> str:
'''simple docstring'''
__magic_name__ : int = hf_api.dataset_info(_snake_case , token=_snake_case )
__magic_name__ : Optional[Any] = HfFileSystem(repo_info=_snake_case , token=_snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_snake_case , _snake_case , clobber=_snake_case )
with pytest.warns(_snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 281 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : List[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Tuple ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__magic_name__ : Optional[Any] = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
__magic_name__ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
__magic_name__ : List[str] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__magic_name__ : Optional[int] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : str , _snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[Any] = dct.pop(_snake_case )
__magic_name__ : Union[str, Any] = val
def lowerCAmelCase_ ( _snake_case : Dict ) -> Tuple:
'''simple docstring'''
if "handwritten" in checkpoint_url:
__magic_name__ : Dict = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__magic_name__ : str = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
__magic_name__ : List[str] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ViTConfig(image_size=384 , qkv_bias=_snake_case )
__magic_name__ : Any = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__magic_name__ : List[str] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
__magic_name__ : str = 1024
__magic_name__ : Optional[int] = 4096
__magic_name__ : Tuple = 24
__magic_name__ : Union[str, Any] = 16
__magic_name__ : List[Any] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__magic_name__ : List[Any] = False
__magic_name__ : Any = "relu"
__magic_name__ : int = 1024
__magic_name__ : Union[str, Any] = True
__magic_name__ : int = False
__magic_name__ : str = False
# load HuggingFace model
__magic_name__ : List[Any] = ViTModel(_snake_case , add_pooling_layer=_snake_case )
__magic_name__ : int = TrOCRForCausalLM(_snake_case )
__magic_name__ : Optional[Any] = VisionEncoderDecoderModel(encoder=_snake_case , decoder=_snake_case )
model.eval()
# load state_dict of original model, rename some keys
__magic_name__ : List[str] = torch.hub.load_state_dict_from_url(_snake_case , map_location="cpu" , check_hash=_snake_case )["model"]
__magic_name__ : List[Any] = create_rename_keys(_snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__magic_name__ : Optional[int] = state_dict.pop(_snake_case )
if key.startswith("decoder" ) and "output_projection" not in key:
__magic_name__ : Optional[Any] = val
else:
__magic_name__ : int = val
# load state dict
model.load_state_dict(_snake_case )
# Check outputs on an image
__magic_name__ : Tuple = ViTImageProcessor(size=encoder_config.image_size )
__magic_name__ : List[Any] = RobertaTokenizer.from_pretrained("roberta-large" )
__magic_name__ : List[Any] = TrOCRProcessor(_snake_case , _snake_case )
__magic_name__ : Tuple = processor(images=prepare_img(_snake_case ) , return_tensors="pt" ).pixel_values
# verify logits
__magic_name__ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__magic_name__ : List[Any] = model(pixel_values=_snake_case , decoder_input_ids=_snake_case )
__magic_name__ : Tuple = outputs.logits
__magic_name__ : Dict = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
__magic_name__ : List[str] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
__magic_name__ : Union[str, Any] = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
__magic_name__ : Optional[int] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
__magic_name__ : int = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _snake_case , atol=1E-3 ), "First elements of logits not as expected"
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
snake_case : Union[str, Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 281 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'convbert'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a=768 , _a=2 , _a=9 , _a=1 , _a=None , **_a , ):
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : List[Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : List[str] = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[Any] = embedding_size
__magic_name__ : List[Any] = head_ratio
__magic_name__ : str = conv_kernel_size
__magic_name__ : Dict = num_groups
__magic_name__ : str = classifier_dropout
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 281 | 1 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : str = logging.get_logger(__name__)
snake_case : Tuple = {"vocab_file": "spiece.model"}
snake_case : Dict = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
snake_case : int = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
UpperCamelCase__ = []
def __init__( self , _a , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="[SEP]" , _a="[MASK]" , _a="[CLS]" , _a = None , **_a , ):
__magic_name__ : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
__magic_name__ : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
__magic_name__ : str = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
__magic_name__ : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
__magic_name__ : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
__magic_name__ : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
__magic_name__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sep_token=_a , mask_token=_a , cls_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Optional[int] = vocab_file
__magic_name__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__magic_name__ : Dict = self.__dict__.copy()
__magic_name__ : Tuple = None
return state
def __setstate__( self , _a ):
__magic_name__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : str = {}
__magic_name__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.encode(_a , out_type=_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.piece_to_id(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = self.sp_model.IdToPiece(_a )
return token
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Dict = []
__magic_name__ : Dict = ""
__magic_name__ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__magic_name__ : Tuple = True
__magic_name__ : List[str] = []
else:
current_sub_tokens.append(_a )
__magic_name__ : Optional[int] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _a , _a = False , _a = None , _a = True , **_a , ):
__magic_name__ : Optional[Any] = kwargs.pop("use_source_tokenizer" , _a )
__magic_name__ : Optional[int] = self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__magic_name__ : str = []
__magic_name__ : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
__magic_name__ : Union[str, Any] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__magic_name__ : Optional[int] = re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(_a ) )
else:
__magic_name__ : Optional[Any] = "".join(_a )
__magic_name__ : Union[str, Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__magic_name__ : Any = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
__magic_name__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 281 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : int = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
__magic_name__ : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" )
return image
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = dct.pop(_snake_case )
__magic_name__ : int = val
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ : List[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__magic_name__ : Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__magic_name__ : Optional[int] = torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) )
__magic_name__ : Union[str, Any] = qkv_bias
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : str ) -> int:
'''simple docstring'''
__magic_name__ : List[Any] = 364 if "coco" in model_name else 224
__magic_name__ : Union[str, Any] = BlipaVisionConfig(image_size=_snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__magic_name__ : List[str] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=_snake_case ).to_dict()
elif "opt-6.7b" in model_name:
__magic_name__ : Any = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=_snake_case ).to_dict()
elif "t5-xl" in model_name:
__magic_name__ : Dict = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
__magic_name__ : List[Any] = BlipaConfig(vision_config=_snake_case , text_config=_snake_case )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : str=None , _snake_case : Dict=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
__magic_name__ : List[Any] = tokenizer("\n" , add_special_tokens=_snake_case ).input_ids[0]
__magic_name__ , __magic_name__ : Tuple = get_blipa_config(_snake_case , eos_token_id=_snake_case )
__magic_name__ : Union[str, Any] = BlipaForConditionalGeneration(_snake_case ).eval()
__magic_name__ : Any = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
__magic_name__ , __magic_name__ : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__magic_name__ : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = load_model_and_preprocess(
name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case )
original_model.eval()
print("Done!" )
# update state dict keys
__magic_name__ : Dict = original_model.state_dict()
__magic_name__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ : Any = state_dict.pop(_snake_case )
if key.startswith("Qformer.bert" ):
__magic_name__ : Optional[int] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
__magic_name__ : Any = key.replace("self" , "attention" )
if "opt_proj" in key:
__magic_name__ : Union[str, Any] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
__magic_name__ : Optional[int] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
__magic_name__ : List[str] = key.replace("opt" , "language" )
if key.startswith("t5" ):
__magic_name__ : Tuple = key.replace("t5" , "language" )
__magic_name__ : Dict = val
# read in qv biases
read_in_q_v_bias(_snake_case , _snake_case )
__magic_name__ , __magic_name__ : Tuple = hf_model.load_state_dict(_snake_case , strict=_snake_case )
assert len(_snake_case ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__magic_name__ : List[Any] = load_demo_image()
__magic_name__ : Tuple = vis_processors["eval"](_snake_case ).unsqueeze(0 ).to(_snake_case )
__magic_name__ : Dict = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(_snake_case )
# create processor
__magic_name__ : Optional[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_snake_case , image_std=_snake_case )
__magic_name__ : Dict = BlipaProcessor(image_processor=_snake_case , tokenizer=_snake_case )
__magic_name__ : Union[str, Any] = processor(images=_snake_case , return_tensors="pt" ).pixel_values.to(_snake_case )
# make sure processor creates exact same pixel values
assert torch.allclose(_snake_case , _snake_case )
original_model.to(_snake_case )
hf_model.to(_snake_case )
with torch.no_grad():
if "opt" in model_name:
__magic_name__ : List[Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
__magic_name__ : Optional[int] = hf_model(_snake_case , _snake_case ).logits
else:
__magic_name__ : int = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
__magic_name__ : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__magic_name__ : List[str] = hf_model(_snake_case , _snake_case , labels=_snake_case ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__magic_name__ : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=_snake_case )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__magic_name__ : Tuple = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=_snake_case )
else:
# cast to same type
__magic_name__ : str = logits.dtype
assert torch.allclose(original_logits.to(_snake_case ) , _snake_case , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
__magic_name__ : Optional[int] = ""
__magic_name__ : Dict = tokenizer(_snake_case , return_tensors="pt" ).input_ids.to(_snake_case )
__magic_name__ : int = original_model.generate({"image": original_pixel_values} )
__magic_name__ : Optional[Any] = hf_model.generate(
_snake_case , _snake_case , do_sample=_snake_case , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , _snake_case )
__magic_name__ : Tuple = input_ids.shape[1]
__magic_name__ : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_snake_case )
__magic_name__ : Union[str, Any] = [text.strip() for text in output_text]
print("HF generation:" , _snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
snake_case : Union[str, Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
snake_case : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 | 1 |
def lowerCAmelCase_ ( _snake_case : list ) -> list:
'''simple docstring'''
__magic_name__ : int = len(_snake_case )
for i in range(1 , _snake_case ):
__magic_name__ : Tuple = collection[i]
__magic_name__ : int = 0
__magic_name__ : int = i - 1
while low <= high:
__magic_name__ : Optional[int] = (low + high) // 2
if val < collection[mid]:
__magic_name__ : Dict = mid - 1
else:
__magic_name__ : List[str] = mid + 1
for j in range(_snake_case , _snake_case , -1 ):
__magic_name__ : Any = collection[j - 1]
__magic_name__ : int = val
return collection
if __name__ == "__main__":
snake_case : Any = input("Enter numbers separated by a comma:\n").strip()
snake_case : Tuple = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 281 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
snake_case : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
snake_case : Union[str, Any] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
__magic_name__ : List[str] = set(_snake_case )
return pairs
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
__magic_name__ : Dict = vocab_file
__magic_name__ : Tuple = merges_file
__magic_name__ : List[Any] = {}
__magic_name__ : List[Any] = 0
__magic_name__ : Tuple = 1
__magic_name__ : int = 2
__magic_name__ : Union[str, Any] = 3
self.add_from_file(_a )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
__magic_name__ : List[str] = merges_handle.read().split("\n" )[:-1]
__magic_name__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__magic_name__ : Union[str, Any] = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _a ):
if token in self.cache:
return self.cache[token]
__magic_name__ : List[Any] = tuple(_a )
__magic_name__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__magic_name__ : Any = get_pairs(_a )
if not pairs:
return token
while True:
__magic_name__ : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : List[str] = bigram
__magic_name__ : List[str] = []
__magic_name__ : List[str] = 0
while i < len(_a ):
try:
__magic_name__ : Any = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Union[str, Any] = tuple(_a )
__magic_name__ : Optional[int] = new_word
if len(_a ) == 1:
break
else:
__magic_name__ : List[Any] = get_pairs(_a )
__magic_name__ : Optional[int] = "@@ ".join(_a )
__magic_name__ : Tuple = word[:-4]
__magic_name__ : str = word
return word
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = []
__magic_name__ : Dict = re.findall(r"\S+\n?" , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = " ".join(_a ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Union[str, Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self , _a ):
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__magic_name__ : List[Any] = f.readlines()
for lineTmp in lines:
__magic_name__ : Optional[Any] = lineTmp.strip()
__magic_name__ : Union[str, Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__magic_name__ : Optional[int] = line[:idx]
__magic_name__ : Dict = len(self.encoder )
| 281 | 1 |
def lowerCAmelCase_ ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
while a != 0:
__magic_name__ , __magic_name__ : List[str] = b % a, a
return b
def lowerCAmelCase_ ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
if gcd(_snake_case , _snake_case ) != 1:
__magic_name__ : Union[str, Any] = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_snake_case )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = 1, 0, a
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = 0, 1, m
while va != 0:
__magic_name__ : List[Any] = ua // va
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 281 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( _snake_case : str = "laptop" ) -> DataFrame:
'''simple docstring'''
__magic_name__ : Tuple = F'''https://www.amazon.in/laptop/s?k={product}'''
__magic_name__ : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__magic_name__ : Tuple = BeautifulSoup(requests.get(_snake_case , headers=_snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__magic_name__ : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
__magic_name__ : Dict = item.ha.text
__magic_name__ : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
__magic_name__ : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
__magic_name__ : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__magic_name__ : Dict = "Not available"
try:
__magic_name__ : Optional[int] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__magic_name__ : List[str] = ""
try:
__magic_name__ : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
__magic_name__ : str = float("nan" )
except AttributeError:
pass
__magic_name__ : Optional[int] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__magic_name__ : Optional[Any] = " "
__magic_name__ : str = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case : Any = "headphones"
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 281 | 1 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = np.full((len(_snake_case ), sequence_length, 2) , _snake_case )
else:
__magic_name__ : List[Any] = np.full((len(_snake_case ), sequence_length) , _snake_case )
for i, tensor in enumerate(_snake_case ):
if padding_side == "right":
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = tensor[:sequence_length]
else:
__magic_name__ : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(_snake_case , _snake_case ):
__magic_name__ : List[Any] = tensor[:sequence_length]
else:
__magic_name__ : Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ord(_snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__magic_name__ : Any = unicodedata.category(_snake_case )
if cat.startswith("P" ):
return True
return False
@dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -100
UpperCamelCase__ = "pt"
def SCREAMING_SNAKE_CASE ( self , _a ):
import torch
__magic_name__ : List[str] = "label" if "label" in features[0].keys() else "labels"
__magic_name__ : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__magic_name__ : Optional[int] = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__magic_name__ : Dict = torch.tensor(batch["entity_ids"] ).shape[1]
__magic_name__ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
__magic_name__ : str = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
__magic_name__ : int = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
__magic_name__ : Dict = [feature["ner_tags"] for feature in features]
__magic_name__ : List[Any] = padding_tensor(_a , -1 , _a , _a )
__magic_name__ : Any = [feature["original_entity_spans"] for feature in features]
__magic_name__ : Any = padding_tensor(_a , (-1, -1) , _a , _a )
__magic_name__ : List[Any] = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 281 |
from __future__ import annotations
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Optional[Any] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase_ ( _snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase_ ( _snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase_ ( _snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
__magic_name__ : int = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : Optional[Any] = Node(4 )
__magic_name__ : Union[str, Any] = Node(5 )
__magic_name__ : Any = Node(6 )
__magic_name__ : int = Node(7 )
__magic_name__ : List[str] = Node(8 )
__magic_name__ : Union[str, Any] = Node(9 )
print(is_full_binary_tree(_snake_case ) )
print(depth_of_tree(_snake_case ) )
print("Tree is: " )
display(_snake_case )
if __name__ == "__main__":
main()
| 281 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : List[str] = "facebook/wmt19-en-de"
snake_case : Dict = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
snake_case : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt")
snake_case : List[str] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
snake_case : Dict = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 281 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = len(_snake_case ) + 1
__magic_name__ : List[str] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ : str = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__magic_name__ : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__magic_name__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ : List[Any] = dp[i - 1][j]
else:
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : Optional[Any] = "aab"
snake_case : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 281 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
snake_case : Union[str, Any] = parser.parse_args()
snake_case : Dict = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
snake_case : Tuple = CLIPImageProcessor()
snake_case : Any = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
snake_case : List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 281 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'funnel'
UpperCamelCase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Tuple = block_sizes
__magic_name__ : Tuple = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__magic_name__ : str = num_decoder_layers
__magic_name__ : str = d_model
__magic_name__ : Dict = n_head
__magic_name__ : Tuple = d_head
__magic_name__ : str = d_inner
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Any = hidden_dropout
__magic_name__ : Dict = attention_dropout
__magic_name__ : Any = activation_dropout
__magic_name__ : Tuple = initializer_range
__magic_name__ : Optional[int] = initializer_std
__magic_name__ : List[str] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__magic_name__ : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__magic_name__ : Any = attention_type
__magic_name__ : int = separate_cls
__magic_name__ : Dict = truncate_seq
__magic_name__ : List[Any] = pool_q_only
super().__init__(**_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def SCREAMING_SNAKE_CASE ( self , _a ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.block_sizes )
@num_blocks.setter
def SCREAMING_SNAKE_CASE ( self , _a ):
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 281 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
snake_case : List[Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
snake_case : Any = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
snake_case : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=None , _a=True , _a=False ):
if rouge_types is None:
__magic_name__ : str = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
__magic_name__ : List[str] = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a )
if use_aggregator:
__magic_name__ : Dict = scoring.BootstrapAggregator()
else:
__magic_name__ : str = []
for ref, pred in zip(_a , _a ):
__magic_name__ : Union[str, Any] = scorer.score(_a , _a )
if use_aggregator:
aggregator.add_scores(_a )
else:
scores.append(_a )
if use_aggregator:
__magic_name__ : Any = aggregator.aggregate()
else:
__magic_name__ : List[Any] = {}
for key in scores[0]:
__magic_name__ : str = [score[key] for score in scores]
return result
| 281 | 1 |
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=snake_case ):
UpperCamelCase__ = ['onnx']
def __init__( self , *_a , **_a ):
requires_backends(self , ["onnx"] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_a , **_a ):
requires_backends(cls , ["onnx"] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_a , **_a ):
requires_backends(cls , ["onnx"] )
| 281 |
snake_case : Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCAmelCase_ ( _snake_case : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Tuple = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_snake_case )
__magic_name__ : Optional[int] = "".join(bin(_snake_case )[2:].zfill(8 ) for byte in data )
__magic_name__ : List[Any] = len(_snake_case ) % 6 != 0
if padding_needed:
# The padding that will be added later
__magic_name__ : List[str] = B"=" * ((6 - len(_snake_case ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_snake_case ) % 6)
else:
__magic_name__ : List[str] = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_snake_case ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase_ ( _snake_case : str ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ) and not isinstance(_snake_case , _snake_case ):
__magic_name__ : List[str] = (
"argument should be a bytes-like object or ASCII string, "
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_snake_case )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_snake_case , _snake_case ):
try:
__magic_name__ : List[Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
__magic_name__ : List[str] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_snake_case ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__magic_name__ : Optional[int] = encoded_data[:-padding]
__magic_name__ : Dict = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__magic_name__ : Union[str, Any] = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )
__magic_name__ : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_snake_case ) , 8 )
]
return bytes(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : List[Any] = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'sew'
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a=2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.1 , _a=0.02 , _a=1e-5 , _a="group" , _a="gelu" , _a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=128 , _a=16 , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a="mean" , _a=False , _a=False , _a=256 , _a=0 , _a=1 , _a=2 , **_a , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
__magic_name__ : Dict = hidden_size
__magic_name__ : Union[str, Any] = feat_extract_norm
__magic_name__ : List[Any] = feat_extract_activation
__magic_name__ : Tuple = list(_a )
__magic_name__ : int = list(_a )
__magic_name__ : Union[str, Any] = list(_a )
__magic_name__ : Any = conv_bias
__magic_name__ : Optional[Any] = num_conv_pos_embeddings
__magic_name__ : str = num_conv_pos_embedding_groups
__magic_name__ : Optional[int] = len(self.conv_dim )
__magic_name__ : int = num_hidden_layers
__magic_name__ : Dict = intermediate_size
__magic_name__ : int = squeeze_factor
__magic_name__ : List[Any] = hidden_act
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Optional[int] = hidden_dropout
__magic_name__ : Dict = attention_dropout
__magic_name__ : Any = activation_dropout
__magic_name__ : Any = feat_proj_dropout
__magic_name__ : Optional[int] = final_dropout
__magic_name__ : Dict = layerdrop
__magic_name__ : Optional[Any] = layer_norm_eps
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ : Any = apply_spec_augment
__magic_name__ : Optional[Any] = mask_time_prob
__magic_name__ : List[Any] = mask_time_length
__magic_name__ : Any = mask_time_min_masks
__magic_name__ : Tuple = mask_feature_prob
__magic_name__ : List[str] = mask_feature_length
__magic_name__ : List[str] = mask_feature_min_masks
# ctc loss
__magic_name__ : str = ctc_loss_reduction
__magic_name__ : Optional[int] = ctc_zero_infinity
# sequence classification
__magic_name__ : Optional[int] = use_weighted_layer_sum
__magic_name__ : int = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 281 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
__magic_name__ : List[Any] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Optional[Any] = use_attention_mask
__magic_name__ : Optional[Any] = use_token_type_ids
__magic_name__ : int = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Any = intermediate_size
__magic_name__ : List[Any] = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : List[Any] = num_choices
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_attention_mask:
__magic_name__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : str = None
if self.use_token_type_ids:
__magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = config_and_inputs
__magic_name__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = config_and_inputs
__magic_name__ : Tuple = True
__magic_name__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_class_name in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : List[str] = model(_a )[0]
__magic_name__ : str = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _a )
# compare the actual values for a slice.
__magic_name__ : List[str] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Tuple = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Dict = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 281 | 1 |
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(_snake_case ):
for j in range(_snake_case ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Dict = [[float("inf" ) for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
__magic_name__ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_snake_case ):
# looping through rows of graph array
for i in range(_snake_case ):
# looping through columns of graph array
for j in range(_snake_case ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__magic_name__ : str = dist[i][k] + dist[k][j]
_print_dist(_snake_case , _snake_case )
return dist, v
if __name__ == "__main__":
snake_case : List[str] = int(input("Enter number of vertices: "))
snake_case : Dict = int(input("Enter number of edges: "))
snake_case : Any = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
snake_case : Optional[int] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
snake_case : Union[str, Any] = int(input("Enter source:"))
snake_case : List[str] = int(input("Enter destination:"))
snake_case : Union[str, Any] = float(input("Enter weight:"))
snake_case : Tuple = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 281 |
def lowerCAmelCase_ ( _snake_case : list[list[int | float]] ) -> int:
'''simple docstring'''
__magic_name__ : Any = len(_snake_case )
__magic_name__ : Optional[Any] = len(matrix[0] )
__magic_name__ : Union[str, Any] = min(_snake_case , _snake_case )
for row in range(_snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _snake_case ):
__magic_name__ : Optional[Any] = matrix[col][row] / matrix[row][row]
for i in range(_snake_case , _snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__magic_name__ : str = True
for i in range(row + 1 , _snake_case ):
if matrix[i][row] != 0:
__magic_name__ , __magic_name__ : List[str] = matrix[i], matrix[row]
__magic_name__ : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(_snake_case ):
__magic_name__ : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case : Dict = re.compile(R"\b(a|an|the)\b", re.UNICODE)
snake_case : Optional[int] = None
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_snake_case , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_snake_case , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : str = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(_snake_case : List[str] ):
return ARTICLES_REGEX.sub(" " , _snake_case )
def white_space_fix(_snake_case : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_snake_case : Optional[int] ):
__magic_name__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_snake_case ) ) ) )
def lowerCAmelCase_ ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(_snake_case ).split()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Dict ) -> Tuple:
'''simple docstring'''
return int(normalize_answer(_snake_case ) == normalize_answer(_snake_case ) )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : int ) -> str:
'''simple docstring'''
__magic_name__ : Any = get_tokens(_snake_case )
__magic_name__ : Optional[int] = get_tokens(_snake_case )
__magic_name__ : Tuple = collections.Counter(_snake_case ) & collections.Counter(_snake_case )
__magic_name__ : Tuple = sum(common.values() )
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__magic_name__ : Dict = 1.0 * num_same / len(_snake_case )
__magic_name__ : Optional[Any] = 1.0 * num_same / len(_snake_case )
__magic_name__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : Union[str, Any] = qa["id"]
__magic_name__ : Any = [t for t in qa["answers"]["text"] if normalize_answer(_snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__magic_name__ : Tuple = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
__magic_name__ : Any = preds[qid]
# Take max over all gold answers
__magic_name__ : List[Any] = max(compute_exact(_snake_case , _snake_case ) for a in gold_answers )
__magic_name__ : int = max(compute_fa(_snake_case , _snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = {}
for qid, s in scores.items():
__magic_name__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
__magic_name__ : str = float(not qid_to_has_ans[qid] )
else:
__magic_name__ : Optional[int] = s
return new_scores
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple=None ) -> Tuple:
'''simple docstring'''
if not qid_list:
__magic_name__ : Any = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
__magic_name__ : Tuple = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : str , _snake_case : str ) -> Dict:
'''simple docstring'''
for k in new_eval:
__magic_name__ : int = new_eval[k]
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
plt.step(_snake_case , _snake_case , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_snake_case , _snake_case , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_snake_case )
plt.savefig(_snake_case )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]=None , _snake_case : int=None ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
__magic_name__ : Optional[int] = 0.0
__magic_name__ : str = 1.0
__magic_name__ : str = 0.0
__magic_name__ : List[str] = [1.0]
__magic_name__ : str = [0.0]
__magic_name__ : Optional[Any] = 0.0
for i, qid in enumerate(_snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__magic_name__ : List[str] = true_pos / float(i + 1 )
__magic_name__ : Any = true_pos / float(_snake_case )
if i == len(_snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_snake_case )
recalls.append(_snake_case )
if out_image:
plot_pr_curve(_snake_case , _snake_case , _snake_case , _snake_case )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
__magic_name__ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
__magic_name__ : Union[str, Any] = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
__magic_name__ : str = {k: float(_snake_case ) for k, v in qid_to_has_ans.items()}
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_snake_case , _snake_case , "pr_exact" )
merge_eval(_snake_case , _snake_case , "pr_f1" )
merge_eval(_snake_case , _snake_case , "pr_oracle" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
if not qid_list:
return
__magic_name__ : Dict = [na_probs[k] for k in qid_list]
__magic_name__ : str = np.ones_like(_snake_case ) / float(len(_snake_case ) )
plt.hist(_snake_case , weights=_snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_snake_case , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__magic_name__ : List[str] = num_no_ans
__magic_name__ : Dict = cur_score
__magic_name__ : Dict = 0.0
__magic_name__ : Any = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
for i, qid in enumerate(_snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__magic_name__ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
__magic_name__ : List[Any] = -1
else:
__magic_name__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
__magic_name__ : Optional[int] = cur_score
__magic_name__ : List[Any] = na_probs[qid]
return 100.0 * best_score / len(_snake_case ), best_thresh
def lowerCAmelCase_ ( _snake_case : int , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : List[str] = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ , __magic_name__ : int = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ : Optional[int] = best_exact
__magic_name__ : List[Any] = exact_thresh
__magic_name__ : Dict = best_fa
__magic_name__ : Any = fa_thresh
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
with open(OPTS.data_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
__magic_name__ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__magic_name__ : Any = json.load(_snake_case )
else:
__magic_name__ : Any = {k: 0.0 for k in preds}
__magic_name__ : str = make_qid_to_has_ans(_snake_case ) # maps qid to True/False
__magic_name__ : Tuple = [k for k, v in qid_to_has_ans.items() if v]
__magic_name__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
__magic_name__ , __magic_name__ : Union[str, Any] = get_raw_scores(_snake_case , _snake_case )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case )
if has_ans_qids:
__magic_name__ : int = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "HasAns" )
if no_ans_qids:
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , OPTS.out_image_dir )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_snake_case , _snake_case )
else:
print(json.dumps(_snake_case , indent=2 ) )
if __name__ == "__main__":
snake_case : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 281 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case : Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _snake_case ( snake_case ):
UpperCamelCase__ = ['pixel_values']
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , _a = True , **_a , ):
super().__init__(**_a )
__magic_name__ : int = size if size is not None else {"shortest_edge": 224}
__magic_name__ : str = get_size_dict(_a , default_to_square=_a )
__magic_name__ : str = crop_size if crop_size is not None else {"height": 224, "width": 224}
__magic_name__ : Optional[Any] = get_size_dict(_a , default_to_square=_a , param_name="crop_size" )
__magic_name__ : Dict = do_resize
__magic_name__ : Any = size
__magic_name__ : Any = resample
__magic_name__ : Optional[Any] = do_center_crop
__magic_name__ : Any = crop_size
__magic_name__ : int = do_rescale
__magic_name__ : Union[str, Any] = rescale_factor
__magic_name__ : int = do_normalize
__magic_name__ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__magic_name__ : str = image_std if image_std is not None else OPENAI_CLIP_STD
__magic_name__ : Union[str, Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ):
__magic_name__ : int = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__magic_name__ : str = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a = None , **_a , ):
__magic_name__ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a = None , **_a , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a = None , **_a , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : str = size if size is not None else self.size
__magic_name__ : List[str] = get_size_dict(_a , param_name="size" , default_to_square=_a )
__magic_name__ : List[Any] = resample if resample is not None else self.resample
__magic_name__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
__magic_name__ : Any = get_size_dict(_a , param_name="crop_size" , default_to_square=_a )
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Optional[Any] = image_std if image_std is not None else self.image_std
__magic_name__ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__magic_name__ : Optional[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__magic_name__ : Union[str, Any] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
__magic_name__ : Any = [to_numpy_array(_a ) for image in images]
if do_resize:
__magic_name__ : List[Any] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
__magic_name__ : Dict = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
__magic_name__ : int = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
__magic_name__ : Dict = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
__magic_name__ : int = [to_channel_dimension_format(_a , _a ) for image in images]
__magic_name__ : List[str] = {"pixel_values": images}
return BatchFeature(data=_a , tensor_type=_a )
| 281 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : str = "▁"
snake_case : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = BigBirdTokenizer
UpperCamelCase__ = BigBirdTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
__magic_name__ : Optional[Any] = self.tokenizer_class(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = "<s>"
__magic_name__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_a ) , 1_004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Any = "I was born in 92000, and this is falsé."
__magic_name__ : Dict = tokenizer.tokenize(_a )
__magic_name__ : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__magic_name__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
__magic_name__ : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Dict = tokenizer.encode(_a )
__magic_name__ : Optional[int] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = BigBirdTokenizer(_a , keep_accents=_a )
__magic_name__ : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
__magic_name__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = "Hello World!"
__magic_name__ : Dict = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
__magic_name__ : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__magic_name__ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__magic_name__ : List[Any] = " ".join(_a )
__magic_name__ : Any = self.big_tokenizer.encode_plus(_a , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : List[str] = BigBirdConfig(attention_type="original_full" )
__magic_name__ : Optional[int] = BigBirdModel(_a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
__magic_name__ : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
__magic_name__ : Optional[Any] = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 281 | 1 |
import string
def lowerCAmelCase_ ( _snake_case : str ) -> str:
'''simple docstring'''
__magic_name__ : Any = ""
for i in sequence:
__magic_name__ : Optional[Any] = ord(_snake_case )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def lowerCAmelCase_ ( _snake_case : str ) -> str:
'''simple docstring'''
__magic_name__ : Tuple = string.ascii_letters
__magic_name__ : str = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_snake_case )] if c in letters else c for c in sequence )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
__magic_name__ : Any = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=_snake_case )} seconds''' )
print(F'''> atbash(): {timeit("atbash(printable)" , setup=_snake_case )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 281 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : List[str] = {"vocab_file": "spiece.model"}
snake_case : List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
snake_case : Tuple = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
snake_case : List[str] = "▁"
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ : str = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
__magic_name__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Dict = do_lower_case
__magic_name__ : Tuple = remove_space
__magic_name__ : Union[str, Any] = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__magic_name__ : List[str] = self.__dict__.copy()
__magic_name__ : Any = None
return state
def __setstate__( self , _a ):
__magic_name__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : str = {}
__magic_name__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.remove_space:
__magic_name__ : List[Any] = " ".join(inputs.strip().split() )
else:
__magic_name__ : str = inputs
__magic_name__ : int = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__magic_name__ : str = unicodedata.normalize("NFKD" , _a )
__magic_name__ : Tuple = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__magic_name__ : int = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = self.preprocess_text(_a )
__magic_name__ : Dict = self.sp_model.encode(_a , out_type=_a )
__magic_name__ : Any = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__magic_name__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ : List[str] = cur_pieces[1:]
else:
__magic_name__ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = []
__magic_name__ : Union[str, Any] = ""
__magic_name__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__magic_name__ : List[Any] = True
__magic_name__ : Optional[int] = []
else:
current_sub_tokens.append(_a )
__magic_name__ : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[int] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : List[str] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 281 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
snake_case : List[str] = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
snake_case : List[Any] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : Tuple = create_model(
"HTSAT-tiny" , "roberta" , _snake_case , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_snake_case , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Dict = R".*sequential.(\d+).*"
__magic_name__ : List[str] = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__magic_name__ : Any = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
__magic_name__ : int = re.match(_snake_case , _snake_case ).group(1 )
__magic_name__ : Union[str, Any] = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
__magic_name__ : Tuple = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__magic_name__ : str = 1 if projecton_layer == 0 else 2
__magic_name__ : int = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__magic_name__ : Union[str, Any] = value
__magic_name__ : List[str] = mixed_qkv.size(0 ) // 3
__magic_name__ : int = mixed_qkv[:qkv_dim]
__magic_name__ : str = mixed_qkv[qkv_dim : qkv_dim * 2]
__magic_name__ : List[str] = mixed_qkv[qkv_dim * 2 :]
__magic_name__ : List[Any] = query_layer
__magic_name__ : int = key_layer
__magic_name__ : Any = value_layer
else:
__magic_name__ : str = value
return model_state_dict
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Any=False ) -> Dict:
'''simple docstring'''
__magic_name__ , __magic_name__ : Any = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
__magic_name__ : Union[str, Any] = clap_model.state_dict()
__magic_name__ : Tuple = rename_state_dict(_snake_case )
__magic_name__ : List[str] = ClapConfig()
__magic_name__ : Tuple = enable_fusion
__magic_name__ : Any = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
snake_case : List[str] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 281 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = np.full((len(_snake_case ), sequence_length, 2) , _snake_case )
else:
__magic_name__ : List[Any] = np.full((len(_snake_case ), sequence_length) , _snake_case )
for i, tensor in enumerate(_snake_case ):
if padding_side == "right":
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = tensor[:sequence_length]
else:
__magic_name__ : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(_snake_case , _snake_case ):
__magic_name__ : List[Any] = tensor[:sequence_length]
else:
__magic_name__ : Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ord(_snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__magic_name__ : Any = unicodedata.category(_snake_case )
if cat.startswith("P" ):
return True
return False
@dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -100
UpperCamelCase__ = "pt"
def SCREAMING_SNAKE_CASE ( self , _a ):
import torch
__magic_name__ : List[str] = "label" if "label" in features[0].keys() else "labels"
__magic_name__ : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__magic_name__ : Optional[int] = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__magic_name__ : Dict = torch.tensor(batch["entity_ids"] ).shape[1]
__magic_name__ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
__magic_name__ : str = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
__magic_name__ : int = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
__magic_name__ : Dict = [feature["ner_tags"] for feature in features]
__magic_name__ : List[Any] = padding_tensor(_a , -1 , _a , _a )
__magic_name__ : Any = [feature["original_entity_spans"] for feature in features]
__magic_name__ : Any = padding_tensor(_a , (-1, -1) , _a , _a )
__magic_name__ : List[Any] = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 281 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Optional[int] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'efficientnet'
def __init__( self , _a = 3 , _a = 600 , _a = 2.0 , _a = 3.1 , _a = 8 , _a = [3, 3, 5, 3, 5, 5, 3] , _a = [32, 16, 24, 40, 80, 112, 192] , _a = [16, 24, 40, 80, 112, 192, 320] , _a = [] , _a = [1, 2, 2, 2, 1, 2, 1] , _a = [1, 2, 2, 3, 3, 4, 1] , _a = [1, 6, 6, 6, 6, 6, 6] , _a = 0.25 , _a = "swish" , _a = 2_560 , _a = "mean" , _a = 0.02 , _a = 0.0_01 , _a = 0.99 , _a = 0.5 , _a = 0.2 , **_a , ):
super().__init__(**_a )
__magic_name__ : Optional[int] = num_channels
__magic_name__ : int = image_size
__magic_name__ : List[Any] = width_coefficient
__magic_name__ : Dict = depth_coefficient
__magic_name__ : Union[str, Any] = depth_divisor
__magic_name__ : str = kernel_sizes
__magic_name__ : List[Any] = in_channels
__magic_name__ : Optional[Any] = out_channels
__magic_name__ : Any = depthwise_padding
__magic_name__ : Dict = strides
__magic_name__ : Dict = num_block_repeats
__magic_name__ : Optional[int] = expand_ratios
__magic_name__ : int = squeeze_expansion_ratio
__magic_name__ : Tuple = hidden_act
__magic_name__ : str = hidden_dim
__magic_name__ : Dict = pooling_type
__magic_name__ : Any = initializer_range
__magic_name__ : Tuple = batch_norm_eps
__magic_name__ : List[str] = batch_norm_momentum
__magic_name__ : Any = dropout_rate
__magic_name__ : List[Any] = drop_connect_rate
__magic_name__ : Any = sum(_a ) * 4
class _snake_case ( snake_case ):
UpperCamelCase__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ):
return 1e-5
| 281 |
import math
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return math.pow(_snake_case , 2 ) - a
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
__magic_name__ : Optional[int] = 2.0
while start <= a:
__magic_name__ : str = math.pow(_snake_case , 2 )
return start
def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
__magic_name__ : Optional[int] = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__magic_name__ : int = value
__magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ):
super().__init__()
__magic_name__ : Tuple = pad_token_id
__magic_name__ : Union[str, Any] = max_length
__magic_name__ : Dict = vocab
__magic_name__ : List[Any] = merges
__magic_name__ : Any = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _a , *_a , **_a ):
__magic_name__ : Optional[int] = [" ".join(_a ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : List[str] = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _a , *_a , **_a ):
__magic_name__ : Any = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _a ):
return cls(**_a )
def SCREAMING_SNAKE_CASE ( self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : int = self.tf_tokenizer(_a )
__magic_name__ : List[str] = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Any = max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : List[str] = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 281 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _snake_case :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ):
__magic_name__ : int = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : List[Any] = is_training
__magic_name__ : Dict = use_labels
__magic_name__ : Optional[Any] = vocab_size
__magic_name__ : int = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Any = eos_token_id
__magic_name__ : str = pad_token_id
__magic_name__ : int = bos_token_id
__magic_name__ : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ : Tuple = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ : List[str] = prepare_led_inputs_dict(_a , _a , _a )
__magic_name__ : Union[str, Any] = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
__magic_name__ : List[Any] = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Dict = TFLEDModel(config=_a ).get_decoder()
__magic_name__ : Optional[int] = inputs_dict["input_ids"]
__magic_name__ : Union[str, Any] = input_ids[:1, :]
__magic_name__ : str = inputs_dict["attention_mask"][:1, :]
__magic_name__ : int = 1
# first forward pass
__magic_name__ : Tuple = model(_a , attention_mask=_a , use_cache=_a )
__magic_name__ , __magic_name__ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ : List[str] = model(_a , attention_mask=_a )[0]
__magic_name__ : Dict = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Any , _snake_case : str=None , _snake_case : List[str]=None , _snake_case : int=None , _snake_case : Any=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
__magic_name__ : str = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFLEDModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[str] = tf.zeros_like(inputs_dict["attention_mask"] )
__magic_name__ : Optional[Any] = 2
__magic_name__ : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__magic_name__ : Any = True
__magic_name__ : str = self.model_tester.seq_length
__magic_name__ : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
__magic_name__ : str = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
__magic_name__ : Any = [t.numpy() for t in outputs.encoder_attentions]
__magic_name__ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = False
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = model_class(_a )
__magic_name__ : str = model(self._prepare_for_class(_a , _a ) )
__magic_name__ : Any = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__magic_name__ : Tuple = model_class(_a )
__magic_name__ : Optional[Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Dict = True
__magic_name__ : str = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__magic_name__ : Union[str, Any] = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
return tf.constant(_snake_case , dtype=tf.intaa )
snake_case : Optional[int] = 1E-4
@slow
@require_tf
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__magic_name__ : Optional[int] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Any = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : List[Any] = model(**_a )[0]
__magic_name__ : List[str] = (1, 1_024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : int = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__magic_name__ : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Optional[Any] = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : Union[str, Any] = model(**_a )[0]
__magic_name__ : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : str = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
| 281 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : int = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
__magic_name__ : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" )
return image
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = dct.pop(_snake_case )
__magic_name__ : int = val
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ : List[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__magic_name__ : Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__magic_name__ : Optional[int] = torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) )
__magic_name__ : Union[str, Any] = qkv_bias
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : str ) -> int:
'''simple docstring'''
__magic_name__ : List[Any] = 364 if "coco" in model_name else 224
__magic_name__ : Union[str, Any] = BlipaVisionConfig(image_size=_snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__magic_name__ : List[str] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=_snake_case ).to_dict()
elif "opt-6.7b" in model_name:
__magic_name__ : Any = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=_snake_case ).to_dict()
elif "t5-xl" in model_name:
__magic_name__ : Dict = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
__magic_name__ : List[Any] = BlipaConfig(vision_config=_snake_case , text_config=_snake_case )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : str=None , _snake_case : Dict=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
__magic_name__ : List[Any] = tokenizer("\n" , add_special_tokens=_snake_case ).input_ids[0]
__magic_name__ , __magic_name__ : Tuple = get_blipa_config(_snake_case , eos_token_id=_snake_case )
__magic_name__ : Union[str, Any] = BlipaForConditionalGeneration(_snake_case ).eval()
__magic_name__ : Any = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
__magic_name__ , __magic_name__ : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__magic_name__ : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = load_model_and_preprocess(
name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case )
original_model.eval()
print("Done!" )
# update state dict keys
__magic_name__ : Dict = original_model.state_dict()
__magic_name__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ : Any = state_dict.pop(_snake_case )
if key.startswith("Qformer.bert" ):
__magic_name__ : Optional[int] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
__magic_name__ : Any = key.replace("self" , "attention" )
if "opt_proj" in key:
__magic_name__ : Union[str, Any] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
__magic_name__ : Optional[int] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
__magic_name__ : List[str] = key.replace("opt" , "language" )
if key.startswith("t5" ):
__magic_name__ : Tuple = key.replace("t5" , "language" )
__magic_name__ : Dict = val
# read in qv biases
read_in_q_v_bias(_snake_case , _snake_case )
__magic_name__ , __magic_name__ : Tuple = hf_model.load_state_dict(_snake_case , strict=_snake_case )
assert len(_snake_case ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__magic_name__ : List[Any] = load_demo_image()
__magic_name__ : Tuple = vis_processors["eval"](_snake_case ).unsqueeze(0 ).to(_snake_case )
__magic_name__ : Dict = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(_snake_case )
# create processor
__magic_name__ : Optional[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_snake_case , image_std=_snake_case )
__magic_name__ : Dict = BlipaProcessor(image_processor=_snake_case , tokenizer=_snake_case )
__magic_name__ : Union[str, Any] = processor(images=_snake_case , return_tensors="pt" ).pixel_values.to(_snake_case )
# make sure processor creates exact same pixel values
assert torch.allclose(_snake_case , _snake_case )
original_model.to(_snake_case )
hf_model.to(_snake_case )
with torch.no_grad():
if "opt" in model_name:
__magic_name__ : List[Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
__magic_name__ : Optional[int] = hf_model(_snake_case , _snake_case ).logits
else:
__magic_name__ : int = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
__magic_name__ : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__magic_name__ : List[str] = hf_model(_snake_case , _snake_case , labels=_snake_case ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__magic_name__ : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=_snake_case )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__magic_name__ : Tuple = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=_snake_case )
else:
# cast to same type
__magic_name__ : str = logits.dtype
assert torch.allclose(original_logits.to(_snake_case ) , _snake_case , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
__magic_name__ : Optional[int] = ""
__magic_name__ : Dict = tokenizer(_snake_case , return_tensors="pt" ).input_ids.to(_snake_case )
__magic_name__ : int = original_model.generate({"image": original_pixel_values} )
__magic_name__ : Optional[Any] = hf_model.generate(
_snake_case , _snake_case , do_sample=_snake_case , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , _snake_case )
__magic_name__ : Tuple = input_ids.shape[1]
__magic_name__ : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_snake_case )
__magic_name__ : Union[str, Any] = [text.strip() for text in output_text]
print("HF generation:" , _snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
snake_case : Union[str, Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
snake_case : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=False ) -> List[str]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__magic_name__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Any , _snake_case : Dict=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ : int = ""
else:
__magic_name__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__magic_name__ : int = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Dict = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : List[str] = in_proj_bias[: config.hidden_size]
__magic_name__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : int = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = dct.pop(_snake_case )
__magic_name__ : List[Any] = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ : List[str] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Any , _snake_case : int=False ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_snake_case , )
__magic_name__ : List[str] = ViTHybridConfig(backbone_config=_snake_case , image_size=384 , num_labels=1000 )
__magic_name__ : str = False
# load original model from timm
__magic_name__ : Union[str, Any] = timm.create_model(_snake_case , pretrained=_snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__magic_name__ : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
__magic_name__ : Tuple = create_rename_keys(_snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , _snake_case )
__magic_name__ : List[str] = "huggingface/label-files"
__magic_name__ : int = "imagenet-1k-id2label.json"
__magic_name__ : Optional[int] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
__magic_name__ : int = {int(_snake_case ): v for k, v in idalabel.items()}
__magic_name__ : List[str] = idalabel
__magic_name__ : List[str] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__magic_name__ : List[str] = ViTHybridModel(_snake_case ).eval()
else:
__magic_name__ : str = ViTHybridForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# create image processor
__magic_name__ : List[Any] = create_transform(**resolve_data_config({} , model=_snake_case ) )
__magic_name__ : int = transform.transforms
__magic_name__ : List[str] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
__magic_name__ : int = ViTHybridImageProcessor(
do_resize=_snake_case , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_snake_case , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__magic_name__ : List[Any] = prepare_img()
__magic_name__ : Any = transform(_snake_case ).unsqueeze(0 )
__magic_name__ : Tuple = processor(_snake_case , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_snake_case , _snake_case )
# verify logits
with torch.no_grad():
__magic_name__ : Optional[int] = model(_snake_case )
__magic_name__ : List[str] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
__magic_name__ : List[str] = timm_model.forward_features(_snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 )
else:
__magic_name__ : Any = timm_model(_snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
snake_case : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
snake_case : Union[str, Any] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
__magic_name__ : List[str] = set(_snake_case )
return pairs
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
__magic_name__ : Dict = vocab_file
__magic_name__ : Tuple = merges_file
__magic_name__ : List[Any] = {}
__magic_name__ : List[Any] = 0
__magic_name__ : Tuple = 1
__magic_name__ : int = 2
__magic_name__ : Union[str, Any] = 3
self.add_from_file(_a )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
__magic_name__ : List[str] = merges_handle.read().split("\n" )[:-1]
__magic_name__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__magic_name__ : Union[str, Any] = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _a ):
if token in self.cache:
return self.cache[token]
__magic_name__ : List[Any] = tuple(_a )
__magic_name__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__magic_name__ : Any = get_pairs(_a )
if not pairs:
return token
while True:
__magic_name__ : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : List[str] = bigram
__magic_name__ : List[str] = []
__magic_name__ : List[str] = 0
while i < len(_a ):
try:
__magic_name__ : Any = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Union[str, Any] = tuple(_a )
__magic_name__ : Optional[int] = new_word
if len(_a ) == 1:
break
else:
__magic_name__ : List[Any] = get_pairs(_a )
__magic_name__ : Optional[int] = "@@ ".join(_a )
__magic_name__ : Tuple = word[:-4]
__magic_name__ : str = word
return word
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = []
__magic_name__ : Dict = re.findall(r"\S+\n?" , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = " ".join(_a ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Union[str, Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self , _a ):
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__magic_name__ : List[Any] = f.readlines()
for lineTmp in lines:
__magic_name__ : Optional[Any] = lineTmp.strip()
__magic_name__ : Union[str, Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__magic_name__ : Optional[int] = line[:idx]
__magic_name__ : Dict = len(self.encoder )
| 281 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : List[str] = "facebook/wmt19-en-de"
snake_case : Dict = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
snake_case : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt")
snake_case : List[str] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
snake_case : Dict = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 281 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _snake_case ( snake_case ):
def __init__( self , _a ):
__magic_name__ : Any = data
def __iter__( self ):
for element in self.data:
yield element
def lowerCAmelCase_ ( _snake_case : List[str]=True ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = Accelerator(even_batches=_snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCAmelCase_ ( _snake_case : Accelerator , _snake_case : int , _snake_case : int , _snake_case : bool = False ) -> str:
'''simple docstring'''
if iterable:
__magic_name__ : Union[str, Any] = DummyIterableDataset(torch.as_tensor(range(_snake_case ) ) )
else:
__magic_name__ : Union[str, Any] = TensorDataset(torch.as_tensor(range(_snake_case ) ) )
__magic_name__ : Tuple = DataLoader(_snake_case , batch_size=_snake_case )
__magic_name__ : Any = accelerator.prepare(_snake_case )
return dl
def lowerCAmelCase_ ( _snake_case : Accelerator , _snake_case : int , _snake_case : int , _snake_case : List[int] , _snake_case : List[int] , ) -> List[Any]:
'''simple docstring'''
__magic_name__ : str = create_dataloader(accelerator=_snake_case , dataset_size=_snake_case , batch_size=_snake_case )
__magic_name__ : int = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
__magic_name__ : str = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = create_accelerator(even_batches=_snake_case )
verify_dataloader_batch_sizes(
_snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
__magic_name__ : Dict = create_accelerator(even_batches=_snake_case )
__magic_name__ : List[str] = torch.nn.Linear(1 , 1 )
__magic_name__ : Tuple = accelerator.prepare(_snake_case )
__magic_name__ : Dict = create_dataloader(_snake_case , dataset_size=3 , batch_size=1 )
__magic_name__ : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_snake_case ):
__magic_name__ : Union[str, Any] = ddp_model(batch[0].float() )
__magic_name__ : Tuple = output.sum()
loss.backward()
batch_idxs.append(_snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCAmelCase_ ( _snake_case : str ) -> str:
'''simple docstring'''
with warnings.catch_warnings(record=_snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Any = True
__magic_name__ : int = False
__magic_name__ : str = create_accelerator(even_batches=_snake_case )
__magic_name__ : List[Any] = torch.nn.Linear(1 , 1 )
__magic_name__ : List[str] = accelerator.prepare(_snake_case )
__magic_name__ : Any = create_dataloader(_snake_case , dataset_size=3 , batch_size=1 )
__magic_name__ : Any = create_dataloader(_snake_case , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_snake_case ):
__magic_name__ : str = train_dl.batch_sampler.even_batches
__magic_name__ : Any = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
__magic_name__ : Optional[Any] = True
__magic_name__ : Any = False
__magic_name__ : int = create_accelerator(even_batches=_snake_case )
__magic_name__ : Optional[int] = torch.nn.Linear(1 , 1 )
__magic_name__ : str = accelerator.prepare(_snake_case )
create_dataloader(_snake_case , dataset_size=3 , batch_size=1 , iterable=_snake_case )
__magic_name__ : Dict = create_dataloader(_snake_case , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_snake_case ):
__magic_name__ : List[str] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = create_accelerator()
__magic_name__ : Tuple = torch.nn.Linear(1 , 1 )
__magic_name__ : Union[str, Any] = accelerator.prepare(_snake_case )
create_dataloader(_snake_case , dataset_size=3 , batch_size=1 , iterable=_snake_case )
with warnings.catch_warnings(record=_snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_snake_case ):
pass
assert issubclass(w[-1].category , _snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
__magic_name__ : str = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
__magic_name__ : Dict = accelerator.state.distributed_type
__magic_name__ : int = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_snake_case )
__magic_name__ : int = original_state
if __name__ == "__main__":
main()
| 281 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[str] = np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_snake_case , encoding="utf_8" ) as f:
__magic_name__ : List[str] = csv.reader(_snake_case )
__magic_name__ : List[Any] = []
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = []
for dataset in encoded_datasets:
__magic_name__ : Union[str, Any] = len(_snake_case )
__magic_name__ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__magic_name__ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
__magic_name__ : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__magic_name__ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : str = with_conta
__magic_name__ : Tuple = with_conta
__magic_name__ : Union[str, Any] = len(_snake_case ) - 1
__magic_name__ : int = len(_snake_case ) - 1
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[int] = mc_label
__magic_name__ : str = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_snake_case , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_snake_case , default="" )
parser.add_argument("--eval_dataset" , type=_snake_case , default="" )
parser.add_argument("--seed" , type=_snake_case , default=42 )
parser.add_argument("--num_train_epochs" , type=_snake_case , default=3 )
parser.add_argument("--train_batch_size" , type=_snake_case , default=8 )
parser.add_argument("--eval_batch_size" , type=_snake_case , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_snake_case , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_snake_case , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_snake_case , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_snake_case , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_snake_case , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_snake_case , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_snake_case , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_snake_case , default=0.01 )
parser.add_argument("--lm_coef" , type=_snake_case , default=0.9 )
parser.add_argument("--n_valid" , type=_snake_case , default=374 )
parser.add_argument("--server_ip" , type=_snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_snake_case , default="" , help="Can be used for distant debugging." )
__magic_name__ : List[Any] = parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__magic_name__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__magic_name__ : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__magic_name__ : List[Any] = ["_start_", "_delimiter_", "_classify_"]
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
__magic_name__ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(_snake_case : str ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info("Encoding dataset..." )
__magic_name__ : Optional[int] = load_rocstories_dataset(args.train_dataset )
__magic_name__ : str = load_rocstories_dataset(args.eval_dataset )
__magic_name__ : int = (train_dataset, eval_dataset)
__magic_name__ : List[str] = tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
__magic_name__ : Optional[Any] = model.config.n_positions // 2 - 2
__magic_name__ : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__magic_name__ : List[str] = min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__magic_name__ : List[Any] = pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
__magic_name__ , __magic_name__ : Optional[int] = tensor_datasets[0], tensor_datasets[1]
__magic_name__ : Tuple = TensorDataset(*_snake_case )
__magic_name__ : Union[str, Any] = RandomSampler(_snake_case )
__magic_name__ : Dict = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
__magic_name__ : Any = TensorDataset(*_snake_case )
__magic_name__ : Optional[Any] = SequentialSampler(_snake_case )
__magic_name__ : int = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__magic_name__ : Tuple = args.max_steps
__magic_name__ : List[str] = args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
__magic_name__ : List[str] = len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__magic_name__ : str = list(model.named_parameters() )
__magic_name__ : Dict = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__magic_name__ : str = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__magic_name__ : str = AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__magic_name__ : List[str] = get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__magic_name__ : List[str] = 0
__magic_name__ : Tuple = 0
__magic_name__ : Dict = tqdm(_snake_case , desc="Training" )
for step, batch in enumerate(_snake_case ):
__magic_name__ : Optional[Any] = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = batch
__magic_name__ : Optional[Any] = model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__magic_name__ : List[str] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__magic_name__ : int = "Training loss: {:.2e} lr: {:.2e}".format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__magic_name__ : Dict = model.module if hasattr(_snake_case , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__magic_name__ : List[Any] = os.path.join(args.output_dir , _snake_case )
__magic_name__ : Dict = os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__magic_name__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
__magic_name__ , __magic_name__ : Any = 0, 0
__magic_name__ , __magic_name__ : Union[str, Any] = 0, 0
for batch in tqdm(_snake_case , desc="Evaluating" ):
__magic_name__ : int = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = batch
with torch.no_grad():
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Tuple = mc_logits.detach().cpu().numpy()
__magic_name__ : Any = mc_labels.to("cpu" ).numpy()
__magic_name__ : str = accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__magic_name__ : Tuple = eval_loss / nb_eval_steps
__magic_name__ : List[Any] = eval_accuracy / nb_eval_examples
__magic_name__ : int = tr_loss / nb_tr_steps if args.do_train else None
__magic_name__ : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__magic_name__ : int = os.path.join(args.output_dir , "eval_results.txt" )
with open(_snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _snake_case , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 281 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
snake_case : List[Any] = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
snake_case : Optional[Any] = F"https://www.google.com/search?q={query}&num=100"
snake_case : str = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
snake_case : Union[str, Any] = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
snake_case : Optional[int] = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 281 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = StableUnCLIPPipeline
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = 32
__magic_name__ : Optional[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__magic_name__ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__magic_name__ : Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=_a , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__magic_name__ : Optional[int] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_a , num_layers=1 , )
torch.manual_seed(0 )
__magic_name__ : Dict = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=_a , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__magic_name__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=_a )
__magic_name__ : List[Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__magic_name__ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__magic_name__ : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_a , layers_per_block=1 , upcast_attention=_a , use_linear_projection=_a , )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_a , steps_offset=1 , )
torch.manual_seed(0 )
__magic_name__ : List[Any] = AutoencoderKL()
__magic_name__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
__magic_name__ : Dict = torch.manual_seed(_a )
else:
__magic_name__ : int = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_a )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__magic_name__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
__magic_name__ : Optional[int] = pipe("anime turle" , generator=_a , output_type="np" )
__magic_name__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__magic_name__ : List[str] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : Any = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__magic_name__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 281 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = "mock-s3-bucket"
__magic_name__ : Any = F'''s3://{mock_bucket}'''
__magic_name__ : str = extract_path_from_uri(_snake_case )
assert dataset_path.startswith("s3://" ) is False
__magic_name__ : Tuple = "./local/path"
__magic_name__ : Optional[Any] = extract_path_from_uri(_snake_case )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : str = is_remote_filesystem(_snake_case )
assert is_remote is True
__magic_name__ : Optional[int] = fsspec.filesystem("file" )
__magic_name__ : int = is_remote_filesystem(_snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__magic_name__ : str = input_paths[compression_fs_class.protocol]
if input_path is None:
__magic_name__ : Dict = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_snake_case )
__magic_name__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case )
assert isinstance(_snake_case , _snake_case )
__magic_name__ : int = os.path.basename(_snake_case )
__magic_name__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_snake_case , "r" , encoding="utf-8" ) as f, open(_snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : int = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__magic_name__ : int = compressed_file_paths[protocol]
__magic_name__ : Tuple = "dataset.jsonl"
__magic_name__ : List[str] = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__magic_name__ , *__magic_name__ : Optional[Any] = fsspec.get_fs_token_paths(_snake_case )
assert fs.isfile(_snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ) -> str:
'''simple docstring'''
__magic_name__ : int = hf_api.dataset_info(_snake_case , token=_snake_case )
__magic_name__ : Optional[Any] = HfFileSystem(repo_info=_snake_case , token=_snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_snake_case , _snake_case , clobber=_snake_case )
with pytest.warns(_snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 281 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
__magic_name__ : Optional[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
__magic_name__ : str = -1
__magic_name__ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__magic_name__ : Any = model.generate(_a , max_new_tokens=10 , do_sample=_a )
__magic_name__ : str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__magic_name__ : List[str] = TextStreamer(_a )
model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__magic_name__ : Any = cs.out[:-1]
self.assertEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
__magic_name__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
__magic_name__ : List[str] = -1
__magic_name__ : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__magic_name__ : Optional[Any] = model.generate(_a , max_new_tokens=10 , do_sample=_a )
__magic_name__ : str = tokenizer.decode(greedy_ids[0] )
__magic_name__ : Dict = TextIteratorStreamer(_a )
__magic_name__ : List[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
__magic_name__ : Any = Thread(target=model.generate , kwargs=_a )
thread.start()
__magic_name__ : Dict = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
__magic_name__ : List[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
__magic_name__ : str = -1
__magic_name__ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__magic_name__ : int = model.generate(_a , max_new_tokens=10 , do_sample=_a )
__magic_name__ : List[Any] = greedy_ids[:, input_ids.shape[1] :]
__magic_name__ : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__magic_name__ : str = TextStreamer(_a , skip_prompt=_a )
model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__magic_name__ : Dict = cs.out[:-1]
self.assertEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__magic_name__ : Tuple = AutoTokenizer.from_pretrained("distilgpt2" )
__magic_name__ : List[Any] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_a )
__magic_name__ : List[str] = -1
__magic_name__ : Optional[Any] = torch.ones((1, 5) , device=_a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__magic_name__ : Union[str, Any] = TextStreamer(_a , skip_special_tokens=_a )
model.generate(_a , max_new_tokens=1 , do_sample=_a , streamer=_a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__magic_name__ : Union[str, Any] = cs.out[:-1] # Remove the final "\n"
__magic_name__ : List[str] = tokenizer(_a , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
__magic_name__ : Optional[int] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_a )
__magic_name__ : Optional[Any] = -1
__magic_name__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__magic_name__ : List[str] = TextIteratorStreamer(_a , timeout=0.0_01 )
__magic_name__ : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
__magic_name__ : str = Thread(target=model.generate , kwargs=_a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_a ):
__magic_name__ : str = ""
for new_text in streamer:
streamer_text += new_text
| 281 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'convbert'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a=768 , _a=2 , _a=9 , _a=1 , _a=None , **_a , ):
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : List[Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : List[str] = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[Any] = embedding_size
__magic_name__ : List[Any] = head_ratio
__magic_name__ : str = conv_kernel_size
__magic_name__ : Dict = num_groups
__magic_name__ : str = classifier_dropout
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 281 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
snake_case : List[Any] = None
snake_case : int = logging.get_logger(__name__)
snake_case : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
snake_case : Dict = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
snake_case : List[str] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
snake_case : List[str] = "▁"
# Segments (not really needed)
snake_case : List[str] = 0
snake_case : Tuple = 1
snake_case : str = 2
snake_case : Union[str, Any] = 3
snake_case : Optional[Any] = 4
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = 'left'
UpperCamelCase__ = XLNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
__magic_name__ : Union[str, Any] = 3
__magic_name__ : Union[str, Any] = do_lower_case
__magic_name__ : List[Any] = remove_space
__magic_name__ : List[Any] = keep_accents
__magic_name__ : str = vocab_file
__magic_name__ : Union[str, Any] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Any = [self.sep_token_id]
__magic_name__ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : str = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 281 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : int = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
__magic_name__ : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" )
return image
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = dct.pop(_snake_case )
__magic_name__ : int = val
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ : List[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__magic_name__ : Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__magic_name__ : Optional[int] = torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) )
__magic_name__ : Union[str, Any] = qkv_bias
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : str ) -> int:
'''simple docstring'''
__magic_name__ : List[Any] = 364 if "coco" in model_name else 224
__magic_name__ : Union[str, Any] = BlipaVisionConfig(image_size=_snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__magic_name__ : List[str] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=_snake_case ).to_dict()
elif "opt-6.7b" in model_name:
__magic_name__ : Any = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=_snake_case ).to_dict()
elif "t5-xl" in model_name:
__magic_name__ : Dict = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
__magic_name__ : List[Any] = BlipaConfig(vision_config=_snake_case , text_config=_snake_case )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : str=None , _snake_case : Dict=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
__magic_name__ : List[Any] = tokenizer("\n" , add_special_tokens=_snake_case ).input_ids[0]
__magic_name__ , __magic_name__ : Tuple = get_blipa_config(_snake_case , eos_token_id=_snake_case )
__magic_name__ : Union[str, Any] = BlipaForConditionalGeneration(_snake_case ).eval()
__magic_name__ : Any = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
__magic_name__ , __magic_name__ : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__magic_name__ : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = load_model_and_preprocess(
name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case )
original_model.eval()
print("Done!" )
# update state dict keys
__magic_name__ : Dict = original_model.state_dict()
__magic_name__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ : Any = state_dict.pop(_snake_case )
if key.startswith("Qformer.bert" ):
__magic_name__ : Optional[int] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
__magic_name__ : Any = key.replace("self" , "attention" )
if "opt_proj" in key:
__magic_name__ : Union[str, Any] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
__magic_name__ : Optional[int] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
__magic_name__ : List[str] = key.replace("opt" , "language" )
if key.startswith("t5" ):
__magic_name__ : Tuple = key.replace("t5" , "language" )
__magic_name__ : Dict = val
# read in qv biases
read_in_q_v_bias(_snake_case , _snake_case )
__magic_name__ , __magic_name__ : Tuple = hf_model.load_state_dict(_snake_case , strict=_snake_case )
assert len(_snake_case ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__magic_name__ : List[Any] = load_demo_image()
__magic_name__ : Tuple = vis_processors["eval"](_snake_case ).unsqueeze(0 ).to(_snake_case )
__magic_name__ : Dict = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(_snake_case )
# create processor
__magic_name__ : Optional[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_snake_case , image_std=_snake_case )
__magic_name__ : Dict = BlipaProcessor(image_processor=_snake_case , tokenizer=_snake_case )
__magic_name__ : Union[str, Any] = processor(images=_snake_case , return_tensors="pt" ).pixel_values.to(_snake_case )
# make sure processor creates exact same pixel values
assert torch.allclose(_snake_case , _snake_case )
original_model.to(_snake_case )
hf_model.to(_snake_case )
with torch.no_grad():
if "opt" in model_name:
__magic_name__ : List[Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
__magic_name__ : Optional[int] = hf_model(_snake_case , _snake_case ).logits
else:
__magic_name__ : int = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
__magic_name__ : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__magic_name__ : List[str] = hf_model(_snake_case , _snake_case , labels=_snake_case ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__magic_name__ : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=_snake_case )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__magic_name__ : Tuple = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=_snake_case )
else:
# cast to same type
__magic_name__ : str = logits.dtype
assert torch.allclose(original_logits.to(_snake_case ) , _snake_case , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
__magic_name__ : Optional[int] = ""
__magic_name__ : Dict = tokenizer(_snake_case , return_tensors="pt" ).input_ids.to(_snake_case )
__magic_name__ : int = original_model.generate({"image": original_pixel_values} )
__magic_name__ : Optional[Any] = hf_model.generate(
_snake_case , _snake_case , do_sample=_snake_case , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , _snake_case )
__magic_name__ : Tuple = input_ids.shape[1]
__magic_name__ : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_snake_case )
__magic_name__ : Union[str, Any] = [text.strip() for text in output_text]
print("HF generation:" , _snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
snake_case : Union[str, Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
snake_case : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float , _snake_case : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
snake_case : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
snake_case : Union[str, Any] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
__magic_name__ : List[str] = set(_snake_case )
return pairs
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
__magic_name__ : Dict = vocab_file
__magic_name__ : Tuple = merges_file
__magic_name__ : List[Any] = {}
__magic_name__ : List[Any] = 0
__magic_name__ : Tuple = 1
__magic_name__ : int = 2
__magic_name__ : Union[str, Any] = 3
self.add_from_file(_a )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
__magic_name__ : List[str] = merges_handle.read().split("\n" )[:-1]
__magic_name__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__magic_name__ : Union[str, Any] = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _a ):
if token in self.cache:
return self.cache[token]
__magic_name__ : List[Any] = tuple(_a )
__magic_name__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__magic_name__ : Any = get_pairs(_a )
if not pairs:
return token
while True:
__magic_name__ : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : List[str] = bigram
__magic_name__ : List[str] = []
__magic_name__ : List[str] = 0
while i < len(_a ):
try:
__magic_name__ : Any = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Union[str, Any] = tuple(_a )
__magic_name__ : Optional[int] = new_word
if len(_a ) == 1:
break
else:
__magic_name__ : List[Any] = get_pairs(_a )
__magic_name__ : Optional[int] = "@@ ".join(_a )
__magic_name__ : Tuple = word[:-4]
__magic_name__ : str = word
return word
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = []
__magic_name__ : Dict = re.findall(r"\S+\n?" , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = " ".join(_a ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Union[str, Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self , _a ):
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__magic_name__ : List[Any] = f.readlines()
for lineTmp in lines:
__magic_name__ : Optional[Any] = lineTmp.strip()
__magic_name__ : Union[str, Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__magic_name__ : Optional[int] = line[:idx]
__magic_name__ : Dict = len(self.encoder )
| 281 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = IFInpaintingSuperResolutionPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE ( self ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
__magic_name__ : Dict = torch.manual_seed(_a )
else:
__magic_name__ : Union[str, Any] = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
__magic_name__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__magic_name__ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def SCREAMING_SNAKE_CASE ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 281 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( _snake_case : str = "laptop" ) -> DataFrame:
'''simple docstring'''
__magic_name__ : Tuple = F'''https://www.amazon.in/laptop/s?k={product}'''
__magic_name__ : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__magic_name__ : Tuple = BeautifulSoup(requests.get(_snake_case , headers=_snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__magic_name__ : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
__magic_name__ : Dict = item.ha.text
__magic_name__ : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
__magic_name__ : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
__magic_name__ : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__magic_name__ : Dict = "Not available"
try:
__magic_name__ : Optional[int] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__magic_name__ : List[str] = ""
try:
__magic_name__ : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
__magic_name__ : str = float("nan" )
except AttributeError:
pass
__magic_name__ : Optional[int] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__magic_name__ : Optional[Any] = " "
__magic_name__ : str = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case : Any = "headphones"
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 281 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
snake_case : str = re.compile(R"\s+")
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_snake_case , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def lowerCAmelCase_ ( _snake_case : int ) -> List[str]:
'''simple docstring'''
__magic_name__ : str = [len(_snake_case ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_snake_case ), "line_max": max(_snake_case )}
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : List[str]=5 ) -> Any:
'''simple docstring'''
__magic_name__ : List[Any] = ["auto-generated", "autogenerated", "automatically generated"]
__magic_name__ : Optional[int] = example["content"].splitlines()
for _, line in zip(range(_snake_case ) , _snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Tuple=5 , _snake_case : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[Any] = ["unit tests", "test file", "configuration file"]
__magic_name__ : Any = example["content"].splitlines()
__magic_name__ : Optional[Any] = 0
__magic_name__ : Tuple = 0
# first test
for _, line in zip(range(_snake_case ) , _snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__magic_name__ : Union[str, Any] = example["content"].count("\n" )
__magic_name__ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCAmelCase_ ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[Any] = ["def ", "class ", "for ", "while "]
__magic_name__ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : int=4 ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Any = example["content"].splitlines()
__magic_name__ : Dict = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Any = tokenizer(example["content"] , truncation=_snake_case )["input_ids"]
__magic_name__ : List[Any] = len(example["content"] ) / len(_snake_case )
return {"ratio": ratio}
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> str:
'''simple docstring'''
__magic_name__ : str = {}
results.update(get_hash(_snake_case ) )
results.update(line_stats(_snake_case ) )
results.update(alpha_stats(_snake_case ) )
results.update(char_token_ratio(_snake_case ) )
results.update(is_autogenerated(_snake_case ) )
results.update(is_config_or_test(_snake_case ) )
results.update(has_no_keywords(_snake_case ) )
results.update(has_few_assignments(_snake_case ) )
return results
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not check_uniques(_snake_case , _snake_case ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
with open(_snake_case , "rb" ) as f_in:
with gzip.open(str(_snake_case ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_snake_case , _snake_case )
os.unlink(_snake_case )
# Settings
snake_case : str = HfArgumentParser(PreprocessingArguments)
snake_case : Any = parser.parse_args()
if args.num_workers is None:
snake_case : Tuple = multiprocessing.cpu_count()
snake_case : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
snake_case : int = time.time()
snake_case : Tuple = load_dataset(args.dataset_name, split="train")
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
snake_case : str = time.time()
snake_case : str = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
snake_case : Tuple = set(ds.unique("hash"))
snake_case : Union[str, Any] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
snake_case : Any = time.time()
snake_case : Union[str, Any] = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
snake_case : Tuple = time.time()
snake_case ,snake_case : Any = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
snake_case : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
snake_case : Dict = output_dir / "data"
data_dir.mkdir(exist_ok=True)
snake_case : Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
snake_case : Optional[Any] = str(data_dir / F"file-{file_number+1:012}.json")
snake_case : Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}")
| 281 |
from __future__ import annotations
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Optional[Any] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase_ ( _snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase_ ( _snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase_ ( _snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
__magic_name__ : int = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : Optional[Any] = Node(4 )
__magic_name__ : Union[str, Any] = Node(5 )
__magic_name__ : Any = Node(6 )
__magic_name__ : int = Node(7 )
__magic_name__ : List[str] = Node(8 )
__magic_name__ : Union[str, Any] = Node(9 )
print(is_full_binary_tree(_snake_case ) )
print(depth_of_tree(_snake_case ) )
print("Tree is: " )
display(_snake_case )
if __name__ == "__main__":
main()
| 281 | 1 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
snake_case : int = "\\n\n"
snake_case : str = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
snake_case : Optional[Any] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a = 16 , _a = True , _a=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__magic_name__ : Any = "cuda"
else:
__magic_name__ : Tuple = "cuda" if torch.cuda.is_available() else "cpu"
__magic_name__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_a )
__magic_name__ : List[str] = model.to(_a )
__magic_name__ : Dict = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__magic_name__ : Union[str, Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__magic_name__ : List[Any] = model.config.max_length - 1
else:
__magic_name__ : List[Any] = model.config.max_length
__magic_name__ : List[Any] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors="pt" , return_attention_mask=_a , ).to(_a )
__magic_name__ : Dict = encodings["input_ids"]
__magic_name__ : int = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__magic_name__ : Tuple = []
__magic_name__ : int = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
__magic_name__ : Any = min(start_index + batch_size , len(_a ) )
__magic_name__ : Optional[int] = encoded_texts[start_index:end_index]
__magic_name__ : Union[str, Any] = attn_masks[start_index:end_index]
if add_start_token:
__magic_name__ : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
__magic_name__ : Dict = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__magic_name__ : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
__magic_name__ : str = encoded_batch
with torch.no_grad():
__magic_name__ : Union[str, Any] = model(_a , attention_mask=_a ).logits
__magic_name__ : Union[str, Any] = out_logits[..., :-1, :].contiguous()
__magic_name__ : List[str] = labels[..., 1:].contiguous()
__magic_name__ : Dict = attn_mask[..., 1:].contiguous()
__magic_name__ : List[str] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 281 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = len(_snake_case ) + 1
__magic_name__ : List[str] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ : str = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__magic_name__ : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__magic_name__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ : List[Any] = dp[i - 1][j]
else:
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : Optional[Any] = "aab"
snake_case : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 281 | 1 |
def lowerCAmelCase_ ( _snake_case : int ) -> list:
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__magic_name__ : str = gray_code_sequence_string(_snake_case )
#
# convert them to integers
for i in range(len(_snake_case ) ):
__magic_name__ : int = int(sequence[i] , 2 )
return sequence
def lowerCAmelCase_ ( _snake_case : int ) -> list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__magic_name__ : Optional[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__magic_name__ : Optional[int] = gray_code_sequence_string(bit_count - 1 )
__magic_name__ : List[str] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__magic_name__ : int = "0" + smaller_sequence[i]
sequence.append(_snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__magic_name__ : Dict = "1" + smaller_sequence[i]
sequence.append(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 | 1 |
import math
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return math.pow(_snake_case , 2 ) - a
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
__magic_name__ : Optional[int] = 2.0
while start <= a:
__magic_name__ : str = math.pow(_snake_case , 2 )
return start
def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
__magic_name__ : Optional[int] = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__magic_name__ : int = value
__magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
snake_case : List[Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
snake_case : Any = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
snake_case : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=None , _a=True , _a=False ):
if rouge_types is None:
__magic_name__ : str = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
__magic_name__ : List[str] = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a )
if use_aggregator:
__magic_name__ : Dict = scoring.BootstrapAggregator()
else:
__magic_name__ : str = []
for ref, pred in zip(_a , _a ):
__magic_name__ : Union[str, Any] = scorer.score(_a , _a )
if use_aggregator:
aggregator.add_scores(_a )
else:
scores.append(_a )
if use_aggregator:
__magic_name__ : Any = aggregator.aggregate()
else:
__magic_name__ : List[Any] = {}
for key in scores[0]:
__magic_name__ : str = [score[key] for score in scores]
return result
| 281 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 281 |
snake_case : Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCAmelCase_ ( _snake_case : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Tuple = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_snake_case )
__magic_name__ : Optional[int] = "".join(bin(_snake_case )[2:].zfill(8 ) for byte in data )
__magic_name__ : List[Any] = len(_snake_case ) % 6 != 0
if padding_needed:
# The padding that will be added later
__magic_name__ : List[str] = B"=" * ((6 - len(_snake_case ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_snake_case ) % 6)
else:
__magic_name__ : List[str] = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_snake_case ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase_ ( _snake_case : str ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ) and not isinstance(_snake_case , _snake_case ):
__magic_name__ : List[str] = (
"argument should be a bytes-like object or ASCII string, "
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_snake_case )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_snake_case , _snake_case ):
try:
__magic_name__ : List[Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
__magic_name__ : List[str] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_snake_case ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__magic_name__ : Optional[int] = encoded_data[:-padding]
__magic_name__ : Dict = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__magic_name__ : Union[str, Any] = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )
__magic_name__ : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_snake_case ) , 8 )
]
return bytes(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Tuple = logging.get_logger(__name__)
snake_case : Optional[int] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'mvp'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _a=50_267 , _a=1_024 , _a=12 , _a=4_096 , _a=16 , _a=12 , _a=4_096 , _a=16 , _a=0.0 , _a=0.0 , _a="gelu" , _a=1_024 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=0.0 , _a=False , _a=True , _a=1 , _a=0 , _a=2 , _a=True , _a=2 , _a=2 , _a=False , _a=100 , _a=800 , **_a , ):
__magic_name__ : List[str] = vocab_size
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : str = d_model
__magic_name__ : List[Any] = encoder_ffn_dim
__magic_name__ : List[str] = encoder_layers
__magic_name__ : str = encoder_attention_heads
__magic_name__ : Optional[Any] = decoder_ffn_dim
__magic_name__ : List[Any] = decoder_layers
__magic_name__ : Optional[int] = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : List[str] = attention_dropout
__magic_name__ : str = activation_dropout
__magic_name__ : str = activation_function
__magic_name__ : str = init_std
__magic_name__ : Dict = encoder_layerdrop
__magic_name__ : Dict = decoder_layerdrop
__magic_name__ : Optional[Any] = classifier_dropout
__magic_name__ : int = use_cache
__magic_name__ : Dict = encoder_layers
__magic_name__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__magic_name__ : str = use_prompt
__magic_name__ : List[str] = prompt_length
__magic_name__ : List[str] = prompt_mid_dim
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , _a ):
__magic_name__ : Union[str, Any] = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
| 281 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
__magic_name__ : List[Any] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Optional[Any] = use_attention_mask
__magic_name__ : Optional[Any] = use_token_type_ids
__magic_name__ : int = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Any = intermediate_size
__magic_name__ : List[Any] = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : List[Any] = num_choices
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_attention_mask:
__magic_name__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : str = None
if self.use_token_type_ids:
__magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = config_and_inputs
__magic_name__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = config_and_inputs
__magic_name__ : Tuple = True
__magic_name__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_class_name in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : List[str] = model(_a )[0]
__magic_name__ : str = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _a )
# compare the actual values for a slice.
__magic_name__ : List[str] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Tuple = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Dict = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 281 | 1 |
snake_case : str = 256
# Modulus to hash a string
snake_case : Dict = 1_000_003
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Tuple = len(_snake_case )
__magic_name__ : List[str] = len(_snake_case )
if p_len > t_len:
return False
__magic_name__ : Optional[Any] = 0
__magic_name__ : Optional[int] = 0
__magic_name__ : Optional[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
__magic_name__ : Optional[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__magic_name__ : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__magic_name__ : List[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__magic_name__ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
__magic_name__ : List[Any] = "abc1abc12"
__magic_name__ : int = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__magic_name__ : List[str] = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
__magic_name__ : str = "ABABX"
__magic_name__ : Optional[int] = "ABABZABABYABABX"
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
__magic_name__ : List[Any] = "AAAB"
__magic_name__ : Tuple = "ABAAAAAB"
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
__magic_name__ : Optional[Any] = "abcdabcy"
__magic_name__ : Union[str, Any] = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
__magic_name__ : Any = "Lü"
__magic_name__ : List[Any] = "Lüsai"
assert rabin_karp(_snake_case , _snake_case )
__magic_name__ : Dict = "Lue"
assert not rabin_karp(_snake_case , _snake_case )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 281 |
def lowerCAmelCase_ ( _snake_case : list[list[int | float]] ) -> int:
'''simple docstring'''
__magic_name__ : Any = len(_snake_case )
__magic_name__ : Optional[Any] = len(matrix[0] )
__magic_name__ : Union[str, Any] = min(_snake_case , _snake_case )
for row in range(_snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _snake_case ):
__magic_name__ : Optional[Any] = matrix[col][row] / matrix[row][row]
for i in range(_snake_case , _snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__magic_name__ : str = True
for i in range(row + 1 , _snake_case ):
if matrix[i][row] != 0:
__magic_name__ , __magic_name__ : List[str] = matrix[i], matrix[row]
__magic_name__ : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(_snake_case ):
__magic_name__ : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
def __init__( self , _a , _a ):
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 1 , _a = 50 , _a = None , _a = "pil" , _a = True , **_a , ):
__magic_name__ : Any = self.unet.config.sample_size
__magic_name__ : Union[str, Any] = (batch_size, 3, img_size, img_size)
__magic_name__ : str = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__magic_name__ : Tuple = randn_tensor(_a , generator=_a , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__magic_name__ : int = self.scheduler.schedule[t]
__magic_name__ : Optional[int] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__magic_name__ , __magic_name__ : Optional[Any] = self.scheduler.add_noise_to_input(_a , _a , generator=_a )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__magic_name__ : Any = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__magic_name__ : Optional[Any] = self.scheduler.step(_a , _a , _a , _a )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__magic_name__ : str = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__magic_name__ : Any = self.scheduler.step_correct(
_a , _a , _a , _a , step_output.prev_sample , step_output["derivative"] , )
__magic_name__ : List[str] = step_output.prev_sample
__magic_name__ : Any = (sample / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : List[Any] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 281 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case : Dict = re.compile(R"\b(a|an|the)\b", re.UNICODE)
snake_case : Optional[int] = None
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_snake_case , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_snake_case , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : str = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(_snake_case : List[str] ):
return ARTICLES_REGEX.sub(" " , _snake_case )
def white_space_fix(_snake_case : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_snake_case : Optional[int] ):
__magic_name__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_snake_case ) ) ) )
def lowerCAmelCase_ ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(_snake_case ).split()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Dict ) -> Tuple:
'''simple docstring'''
return int(normalize_answer(_snake_case ) == normalize_answer(_snake_case ) )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : int ) -> str:
'''simple docstring'''
__magic_name__ : Any = get_tokens(_snake_case )
__magic_name__ : Optional[int] = get_tokens(_snake_case )
__magic_name__ : Tuple = collections.Counter(_snake_case ) & collections.Counter(_snake_case )
__magic_name__ : Tuple = sum(common.values() )
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__magic_name__ : Dict = 1.0 * num_same / len(_snake_case )
__magic_name__ : Optional[Any] = 1.0 * num_same / len(_snake_case )
__magic_name__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : Union[str, Any] = qa["id"]
__magic_name__ : Any = [t for t in qa["answers"]["text"] if normalize_answer(_snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__magic_name__ : Tuple = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
__magic_name__ : Any = preds[qid]
# Take max over all gold answers
__magic_name__ : List[Any] = max(compute_exact(_snake_case , _snake_case ) for a in gold_answers )
__magic_name__ : int = max(compute_fa(_snake_case , _snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = {}
for qid, s in scores.items():
__magic_name__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
__magic_name__ : str = float(not qid_to_has_ans[qid] )
else:
__magic_name__ : Optional[int] = s
return new_scores
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple=None ) -> Tuple:
'''simple docstring'''
if not qid_list:
__magic_name__ : Any = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
__magic_name__ : Tuple = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : str , _snake_case : str ) -> Dict:
'''simple docstring'''
for k in new_eval:
__magic_name__ : int = new_eval[k]
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
plt.step(_snake_case , _snake_case , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_snake_case , _snake_case , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_snake_case )
plt.savefig(_snake_case )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]=None , _snake_case : int=None ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
__magic_name__ : Optional[int] = 0.0
__magic_name__ : str = 1.0
__magic_name__ : str = 0.0
__magic_name__ : List[str] = [1.0]
__magic_name__ : str = [0.0]
__magic_name__ : Optional[Any] = 0.0
for i, qid in enumerate(_snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__magic_name__ : List[str] = true_pos / float(i + 1 )
__magic_name__ : Any = true_pos / float(_snake_case )
if i == len(_snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_snake_case )
recalls.append(_snake_case )
if out_image:
plot_pr_curve(_snake_case , _snake_case , _snake_case , _snake_case )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
__magic_name__ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
__magic_name__ : Union[str, Any] = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
__magic_name__ : str = {k: float(_snake_case ) for k, v in qid_to_has_ans.items()}
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_snake_case , _snake_case , "pr_exact" )
merge_eval(_snake_case , _snake_case , "pr_f1" )
merge_eval(_snake_case , _snake_case , "pr_oracle" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
if not qid_list:
return
__magic_name__ : Dict = [na_probs[k] for k in qid_list]
__magic_name__ : str = np.ones_like(_snake_case ) / float(len(_snake_case ) )
plt.hist(_snake_case , weights=_snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_snake_case , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__magic_name__ : List[str] = num_no_ans
__magic_name__ : Dict = cur_score
__magic_name__ : Dict = 0.0
__magic_name__ : Any = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
for i, qid in enumerate(_snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__magic_name__ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
__magic_name__ : List[Any] = -1
else:
__magic_name__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
__magic_name__ : Optional[int] = cur_score
__magic_name__ : List[Any] = na_probs[qid]
return 100.0 * best_score / len(_snake_case ), best_thresh
def lowerCAmelCase_ ( _snake_case : int , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : List[str] = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ , __magic_name__ : int = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ : Optional[int] = best_exact
__magic_name__ : List[Any] = exact_thresh
__magic_name__ : Dict = best_fa
__magic_name__ : Any = fa_thresh
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
with open(OPTS.data_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
__magic_name__ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__magic_name__ : Any = json.load(_snake_case )
else:
__magic_name__ : Any = {k: 0.0 for k in preds}
__magic_name__ : str = make_qid_to_has_ans(_snake_case ) # maps qid to True/False
__magic_name__ : Tuple = [k for k, v in qid_to_has_ans.items() if v]
__magic_name__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
__magic_name__ , __magic_name__ : Union[str, Any] = get_raw_scores(_snake_case , _snake_case )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case )
if has_ans_qids:
__magic_name__ : int = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "HasAns" )
if no_ans_qids:
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , OPTS.out_image_dir )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_snake_case , _snake_case )
else:
print(json.dumps(_snake_case , indent=2 ) )
if __name__ == "__main__":
snake_case : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 281 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 281 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : str = "▁"
snake_case : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = BigBirdTokenizer
UpperCamelCase__ = BigBirdTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
__magic_name__ : Optional[Any] = self.tokenizer_class(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = "<s>"
__magic_name__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_a ) , 1_004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Any = "I was born in 92000, and this is falsé."
__magic_name__ : Dict = tokenizer.tokenize(_a )
__magic_name__ : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__magic_name__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
__magic_name__ : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Dict = tokenizer.encode(_a )
__magic_name__ : Optional[int] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = BigBirdTokenizer(_a , keep_accents=_a )
__magic_name__ : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
__magic_name__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = "Hello World!"
__magic_name__ : Dict = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
__magic_name__ : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__magic_name__ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__magic_name__ : List[Any] = " ".join(_a )
__magic_name__ : Any = self.big_tokenizer.encode_plus(_a , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : List[str] = BigBirdConfig(attention_type="original_full" )
__magic_name__ : Optional[int] = BigBirdModel(_a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
__magic_name__ : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
__magic_name__ : Optional[Any] = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 281 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCAmelCase_ ( _snake_case : List[Any]=32 , _snake_case : int=10 , _snake_case : Dict=100 , _snake_case : Optional[int]=1026 , _snake_case : Any=True , _snake_case : List[Any]="data/tokenized_stories_train_wikitext103.jbl" , _snake_case : int="igf_context_pairs.jbl" , ) -> Union[str, Any]:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
__magic_name__ , __magic_name__ : Tuple = generate_datasets(
_snake_case , _snake_case , number=_snake_case , min_len=1026 , trim=_snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__magic_name__ : Dict = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
__magic_name__ : List[Any] = load_gpta("gpt2" ).to(_snake_case )
print("computing perplexity on objective set" )
__magic_name__ : Dict = compute_perplexity(_snake_case , _snake_case , _snake_case ).item()
print("perplexity on objective set:" , _snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Tuple=15 , _snake_case : List[str]=128 , _snake_case : Tuple=100 , _snake_case : Union[str, Any]="igf_model.pt" , ) -> int:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
__magic_name__ : Optional[int] = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
__magic_name__ : List[Any] = SecondaryLearner(_snake_case )
# Train secondary learner
__magic_name__ : Optional[int] = train_secondary_learner(
_snake_case , _snake_case , max_epochs=_snake_case , batch_size=_snake_case , eval_freq=100 , igf_model_path=_snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Any , _snake_case : Dict , _snake_case : int=32 , _snake_case : Optional[int]=1000 , _snake_case : List[str]=16 , _snake_case : List[str]=1.0 , _snake_case : Optional[int]=recopy_gpta , _snake_case : Optional[Any]=None , _snake_case : Any=10 , _snake_case : Optional[int]="gpt2_finetuned.pt" , ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
__magic_name__ : Dict = RandomSampler(_snake_case )
__magic_name__ : List[str] = DataLoader(_snake_case , sampler=_snake_case )
__magic_name__ : List[str] = max_steps // (len(_snake_case )) + 1
__magic_name__ : int = 0
__magic_name__ : int = torch.zeros((1, context_len) , dtype=torch.long , device=_snake_case )
__magic_name__ , __magic_name__ , __magic_name__ : List[str] = recopy_model(_snake_case , _snake_case , _snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(_snake_case )
secondary_learner.eval()
__magic_name__ : Tuple = []
__magic_name__ : Any = 0
__magic_name__ : Dict = []
__magic_name__ : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
__magic_name__ : List[Any] = compute_perplexity(_snake_case , _snake_case , _snake_case )
test_perps.append(_snake_case )
print("Test perplexity, step" , _snake_case , ":" , _snake_case )
for epoch in range(int(_snake_case ) ):
for step, example in enumerate(_snake_case ):
torch.cuda.empty_cache()
__magic_name__ : Dict = random.randint(0 , example.size(2 ) - context_len - 1 )
__magic_name__ : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__magic_name__ : Optional[int] = model(_snake_case , labels=_snake_case )
__magic_name__ : str = True
if secondary_learner is not None:
__magic_name__ : int = secondary_learner.forward(
torch.tensor(_snake_case , dtype=torch.long , device=_snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__magic_name__ : List[Any] = -1
if predicted_q < threshold:
__magic_name__ : int = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__magic_name__ : Dict = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__magic_name__ : List[str] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__magic_name__ : Dict = compute_perplexity(_snake_case , _snake_case , _snake_case )
test_perps.append(_snake_case )
print("Test perplexity, step" , _snake_case , ":" , _snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : str = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_snake_case , type=_snake_case , required=_snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_snake_case , default=_snake_case , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_snake_case , default=_snake_case , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_snake_case , type=_snake_case , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_snake_case , default=_snake_case , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=_snake_case , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_snake_case , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_snake_case , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=_snake_case , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_snake_case , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_snake_case , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=_snake_case , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_snake_case , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=_snake_case , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_snake_case , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=_snake_case , type=_snake_case , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=_snake_case , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_snake_case , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=_snake_case , type=_snake_case , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_snake_case , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__magic_name__ : List[Any] = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
__magic_name__ : List[str] = training_secondary_learner(
_snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__magic_name__ : Union[str, Any] = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__magic_name__ , __magic_name__ : Optional[Any] = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=_snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_snake_case , _snake_case , _snake_case , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_snake_case , secondary_learner=_snake_case , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 281 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : List[str] = {"vocab_file": "spiece.model"}
snake_case : List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
snake_case : Tuple = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
snake_case : List[str] = "▁"
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ : str = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
__magic_name__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Dict = do_lower_case
__magic_name__ : Tuple = remove_space
__magic_name__ : Union[str, Any] = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__magic_name__ : List[str] = self.__dict__.copy()
__magic_name__ : Any = None
return state
def __setstate__( self , _a ):
__magic_name__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : str = {}
__magic_name__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.remove_space:
__magic_name__ : List[Any] = " ".join(inputs.strip().split() )
else:
__magic_name__ : str = inputs
__magic_name__ : int = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__magic_name__ : str = unicodedata.normalize("NFKD" , _a )
__magic_name__ : Tuple = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__magic_name__ : int = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = self.preprocess_text(_a )
__magic_name__ : Dict = self.sp_model.encode(_a , out_type=_a )
__magic_name__ : Any = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__magic_name__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ : List[str] = cur_pieces[1:]
else:
__magic_name__ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = []
__magic_name__ : Union[str, Any] = ""
__magic_name__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__magic_name__ : List[Any] = True
__magic_name__ : Optional[int] = []
else:
current_sub_tokens.append(_a )
__magic_name__ : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[int] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : List[str] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 281 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''Salesforce/blip-image-captioning-base'''
__snake_case = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
__snake_case = '''image_captioner'''
__snake_case = AutoModelForVisionaSeq
__snake_case = ['''image''']
__snake_case = ['''text''']
def __init__( self : Optional[int] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : "Image" ) ->Union[str, Any]:
"""simple docstring"""
return self.pre_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return self.model.generate(**__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return self.pre_processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )[0].strip()
| 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = np.full((len(_snake_case ), sequence_length, 2) , _snake_case )
else:
__magic_name__ : List[Any] = np.full((len(_snake_case ), sequence_length) , _snake_case )
for i, tensor in enumerate(_snake_case ):
if padding_side == "right":
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = tensor[:sequence_length]
else:
__magic_name__ : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(_snake_case , _snake_case ):
__magic_name__ : List[Any] = tensor[:sequence_length]
else:
__magic_name__ : Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ord(_snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__magic_name__ : Any = unicodedata.category(_snake_case )
if cat.startswith("P" ):
return True
return False
@dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -100
UpperCamelCase__ = "pt"
def SCREAMING_SNAKE_CASE ( self , _a ):
import torch
__magic_name__ : List[str] = "label" if "label" in features[0].keys() else "labels"
__magic_name__ : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__magic_name__ : Optional[int] = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__magic_name__ : Dict = torch.tensor(batch["entity_ids"] ).shape[1]
__magic_name__ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
__magic_name__ : str = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
__magic_name__ : int = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
__magic_name__ : Dict = [feature["ner_tags"] for feature in features]
__magic_name__ : List[Any] = padding_tensor(_a , -1 , _a , _a )
__magic_name__ : Any = [feature["original_entity_spans"] for feature in features]
__magic_name__ : Any = padding_tensor(_a , (-1, -1) , _a , _a )
__magic_name__ : List[Any] = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 281 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Optional[int] = ["""input_features""", """is_longer"""]
def __init__(self : Tuple , __a : Optional[int]=64 , __a : Union[str, Any]=48000 , __a : str=480 , __a : Optional[int]=10 , __a : List[Any]=1024 , __a : Optional[Any]=0.0 , __a : Tuple=False , __a : float = 0 , __a : float = 14000 , __a : int = None , __a : str = "fusion" , __a : str = "repeatpad" , **__a : Any , ):
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
UpperCAmelCase_ = top_db
UpperCAmelCase_ = truncation
UpperCAmelCase_ = padding
UpperCAmelCase_ = fft_window_size
UpperCAmelCase_ = (fft_window_size >> 1) + 1
UpperCAmelCase_ = hop_length
UpperCAmelCase_ = max_length_s
UpperCAmelCase_ = max_length_s * sampling_rate
UpperCAmelCase_ = sampling_rate
UpperCAmelCase_ = frequency_min
UpperCAmelCase_ = frequency_max
UpperCAmelCase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm=__a , mel_scale="htk" , )
UpperCAmelCase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm="slaney" , mel_scale="slaney" , )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _lowercase (self : str , __a : np.array , __a : Optional[np.array] = None ):
UpperCAmelCase_ = spectrogram(
__a , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__a , log_mel="dB" , )
return log_mel_spectrogram.T
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[Any] , __a : Tuple ):
UpperCAmelCase_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase_ = [0]
# randomly choose index for each part
UpperCAmelCase_ = np.random.choice(ranges[0] )
UpperCAmelCase_ = np.random.choice(ranges[1] )
UpperCAmelCase_ = np.random.choice(ranges[2] )
UpperCAmelCase_ = mel[idx_front : idx_front + chunk_frames, :]
UpperCAmelCase_ = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCAmelCase_ = mel[idx_back : idx_back + chunk_frames, :]
UpperCAmelCase_ = torch.tensor(mel[None, None, :] )
UpperCAmelCase_ = torch.nn.functional.interpolate(
__a , size=[chunk_frames, 64] , mode="bilinear" , align_corners=__a )
UpperCAmelCase_ = mel_shrink[0][0].numpy()
UpperCAmelCase_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _lowercase (self : Optional[Any] , __a : np.array , __a : Any , __a : List[str] , __a : Optional[Any] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCAmelCase_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCAmelCase_ = len(__a ) - max_length
UpperCAmelCase_ = np.random.randint(0 , overflow + 1 )
UpperCAmelCase_ = waveform[idx : idx + max_length]
UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters )
UpperCAmelCase_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCAmelCase_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCAmelCase_ = np.stack([mel, mel, mel, mel] , axis=0 )
UpperCAmelCase_ = False
else:
UpperCAmelCase_ = self._random_mel_fusion(__a , __a , __a )
UpperCAmelCase_ = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
UpperCAmelCase_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCAmelCase_ = int(max_length / len(__a ) )
UpperCAmelCase_ = np.stack(np.tile(__a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCAmelCase_ = int(max_length / len(__a ) )
UpperCAmelCase_ = np.stack(np.tile(__a , __a ) )
UpperCAmelCase_ = np.pad(__a , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters )
UpperCAmelCase_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__(self : Any , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : str = None , __a : Optional[str] = None , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , **__a : int , ):
UpperCAmelCase_ = truncation if truncation is not None else self.truncation
UpperCAmelCase_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCAmelCase_ = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase_ = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ = [np.asarray(__a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
UpperCAmelCase_ = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ = [np.asarray(__a )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCAmelCase_ = [
self._get_input_mel(__a , max_length if max_length else self.nb_max_samples , __a , __a )
for waveform in raw_speech
]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for mel, longer in padded_inputs:
input_mel.append(__a )
is_longer.append(__a )
if truncation == "fusion" and sum(__a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCAmelCase_ = np.random.randint(0 , len(__a ) )
UpperCAmelCase_ = True
if isinstance(input_mel[0] , __a ):
UpperCAmelCase_ = [np.asarray(__a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCAmelCase_ = [[longer] for longer in is_longer]
UpperCAmelCase_ = {"input_features": input_mel, "is_longer": is_longer}
UpperCAmelCase_ = BatchFeature(__a )
if return_tensors is not None:
UpperCAmelCase_ = input_features.convert_to_tensors(__a )
return input_features
| 1 |
import math
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return math.pow(_snake_case , 2 ) - a
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
__magic_name__ : Optional[int] = 2.0
while start <= a:
__magic_name__ : str = math.pow(_snake_case , 2 )
return start
def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
__magic_name__ : Optional[int] = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__magic_name__ : int = value
__magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCamelCase : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : bool = field(default=lowercase_ , metadata={"""help""": """Whether to use SortishSampler or not."""} )
lowerCAmelCase__ : bool = field(
default=lowercase_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
lowerCAmelCase__ : Optional[int] = field(
default=lowercase_ , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
lowerCAmelCase__ : Optional[int] = field(
default=lowercase_ , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
lowerCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=lowercase_ , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = v.to_dict()
return d
| 2 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _snake_case :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ):
__magic_name__ : int = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : List[Any] = is_training
__magic_name__ : Dict = use_labels
__magic_name__ : Optional[Any] = vocab_size
__magic_name__ : int = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Any = eos_token_id
__magic_name__ : str = pad_token_id
__magic_name__ : int = bos_token_id
__magic_name__ : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ : Tuple = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ : List[str] = prepare_led_inputs_dict(_a , _a , _a )
__magic_name__ : Union[str, Any] = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
__magic_name__ : List[Any] = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Dict = TFLEDModel(config=_a ).get_decoder()
__magic_name__ : Optional[int] = inputs_dict["input_ids"]
__magic_name__ : Union[str, Any] = input_ids[:1, :]
__magic_name__ : str = inputs_dict["attention_mask"][:1, :]
__magic_name__ : int = 1
# first forward pass
__magic_name__ : Tuple = model(_a , attention_mask=_a , use_cache=_a )
__magic_name__ , __magic_name__ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ : List[str] = model(_a , attention_mask=_a )[0]
__magic_name__ : Dict = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Any , _snake_case : str=None , _snake_case : List[str]=None , _snake_case : int=None , _snake_case : Any=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
__magic_name__ : str = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFLEDModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[str] = tf.zeros_like(inputs_dict["attention_mask"] )
__magic_name__ : Optional[Any] = 2
__magic_name__ : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__magic_name__ : Any = True
__magic_name__ : str = self.model_tester.seq_length
__magic_name__ : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
__magic_name__ : str = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
__magic_name__ : Any = [t.numpy() for t in outputs.encoder_attentions]
__magic_name__ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = False
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = model_class(_a )
__magic_name__ : str = model(self._prepare_for_class(_a , _a ) )
__magic_name__ : Any = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__magic_name__ : Tuple = model_class(_a )
__magic_name__ : Optional[Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Dict = True
__magic_name__ : str = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__magic_name__ : Union[str, Any] = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
return tf.constant(_snake_case , dtype=tf.intaa )
snake_case : Optional[int] = 1E-4
@slow
@require_tf
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__magic_name__ : Optional[int] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Any = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : List[Any] = model(**_a )[0]
__magic_name__ : List[str] = (1, 1_024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : int = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__magic_name__ : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Optional[Any] = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : Union[str, Any] = model(**_a )[0]
__magic_name__ : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : str = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
| 281 | 0 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( __snake_case ):
__magic_name__ = ['''image_processor''', '''tokenizer''']
__magic_name__ = '''OwlViTImageProcessor'''
__magic_name__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE , )
A : Optional[int] = kwargs.pop('''feature_extractor''' )
A : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="max_length" , SCREAMING_SNAKE_CASE="np" , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or (isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , SCREAMING_SNAKE_CASE )):
A : List[Any] = [self.tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(text[0] , SCREAMING_SNAKE_CASE ):
A : str = []
# Maximum number of queries across batch
A : List[Any] = max([len(SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(SCREAMING_SNAKE_CASE ) != max_num_queries:
A : Dict = t + [''' '''] * (max_num_queries - len(SCREAMING_SNAKE_CASE ))
A : int = self.tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
encodings.append(SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
A : str = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A : int = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A : str = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A : Optional[int] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A : Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
A : Optional[Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A : Tuple = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A : Optional[Any] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
A : List[Any] = BatchEncoding()
A : Optional[int] = input_ids
A : int = attention_mask
if query_images is not None:
A : int = BatchEncoding()
A : List[str] = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).pixel_values
A : int = query_pixel_values
if images is not None:
A : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
A : Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE ) , tensor_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE , )
return self.image_processor
| 3 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=False ) -> List[str]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__magic_name__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Any , _snake_case : Dict=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ : int = ""
else:
__magic_name__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__magic_name__ : int = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Dict = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : List[str] = in_proj_bias[: config.hidden_size]
__magic_name__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : int = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = dct.pop(_snake_case )
__magic_name__ : List[Any] = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ : List[str] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Any , _snake_case : int=False ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_snake_case , )
__magic_name__ : List[str] = ViTHybridConfig(backbone_config=_snake_case , image_size=384 , num_labels=1000 )
__magic_name__ : str = False
# load original model from timm
__magic_name__ : Union[str, Any] = timm.create_model(_snake_case , pretrained=_snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__magic_name__ : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
__magic_name__ : Tuple = create_rename_keys(_snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , _snake_case )
__magic_name__ : List[str] = "huggingface/label-files"
__magic_name__ : int = "imagenet-1k-id2label.json"
__magic_name__ : Optional[int] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
__magic_name__ : int = {int(_snake_case ): v for k, v in idalabel.items()}
__magic_name__ : List[str] = idalabel
__magic_name__ : List[str] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__magic_name__ : List[str] = ViTHybridModel(_snake_case ).eval()
else:
__magic_name__ : str = ViTHybridForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# create image processor
__magic_name__ : List[Any] = create_transform(**resolve_data_config({} , model=_snake_case ) )
__magic_name__ : int = transform.transforms
__magic_name__ : List[str] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
__magic_name__ : int = ViTHybridImageProcessor(
do_resize=_snake_case , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_snake_case , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__magic_name__ : List[Any] = prepare_img()
__magic_name__ : Any = transform(_snake_case ).unsqueeze(0 )
__magic_name__ : Tuple = processor(_snake_case , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_snake_case , _snake_case )
# verify logits
with torch.no_grad():
__magic_name__ : Optional[int] = model(_snake_case )
__magic_name__ : List[str] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
__magic_name__ : List[str] = timm_model.forward_features(_snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 )
else:
__magic_name__ : Any = timm_model(_snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__snake_case =sys.version_info >= (3, 10)
def a_ ( lowerCamelCase : List[Any]=None , lowerCamelCase : Tuple=None ):
return field(default_factory=lambda: default , metadata=lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int
lowerCamelCase : float
lowerCamelCase : str
lowerCamelCase : bool
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int = 42
lowerCamelCase : str = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : bool = False
lowerCamelCase : bool = True
lowerCamelCase : Optional[bool] = None
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = '''titi'''
lowerCamelCase : List[str] = '''toto'''
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[str] = '''titi'''
lowerCamelCase : Any = '''toto'''
lowerCamelCase : Union[str, Any] = 42
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : BasicEnum = "toto"
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase = BasicEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : MixedTypeEnum = "toto"
def __UpperCAmelCase ( self : int ) -> Dict:
lowerCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[float] = field(default=__lowercase , metadata={'''help''': '''help message'''} )
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[List[str]] = list_field(default=[] )
lowerCamelCase : Optional[List[int]] = list_field(default=[] )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : List[int] = list_field(default=[] )
lowerCamelCase : List[int] = list_field(default=[1, 2, 3] )
lowerCamelCase : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
lowerCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : List[int] = field()
lowerCamelCase : str = field()
lowerCamelCase : BasicEnum = field()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase = BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int
lowerCamelCase : "BasicEnum" = field()
lowerCamelCase : "Optional[bool]" = None
lowerCamelCase : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} )
lowerCamelCase : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : bool = False
lowerCamelCase : bool = True
lowerCamelCase : bool | None = None
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int | None = None
lowerCamelCase : float | None = field(default=__lowercase , metadata={'''help''': '''help message'''} )
lowerCamelCase : str | None = None
lowerCamelCase : list[str] | None = list_field(default=[] )
lowerCamelCase : list[int] | None = list_field(default=[] )
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : argparse.ArgumentParser , UpperCAmelCase__ : argparse.ArgumentParser ) -> Dict:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != 'container'}
lowerCAmelCase = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , UpperCAmelCase__ ) and yy.get('choices' , UpperCAmelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](UpperCAmelCase__ ) , yy['type'](UpperCAmelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('--bar' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('--baz' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('--flag' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((lowerCAmelCase) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase__ , look_for_args_file=UpperCAmelCase__ )
self.assertFalse(example.flag )
def __UpperCAmelCase ( self : int ) -> int:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , default=4_2 , type=UpperCAmelCase__ )
expected.add_argument('--baz' , default='toto' , type=UpperCAmelCase__ , help='help message' )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> str:
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' )
expected.add_argument('--baz' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=UpperCAmelCase__ , dest='baz' )
expected.add_argument('--opt' , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
lowerCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
lowerCAmelCase = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
lowerCAmelCase = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
lowerCAmelCase = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
lowerCAmelCase = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 4_2] , type=make_choice_type_function(['titi', 'toto', 4_2] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCAmelCase = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 4_2 )
lowerCAmelCase = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCAmelCase ( self : int ) -> Dict:
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Literal["titi", "toto", 42] = "toto"
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 4_2) , type=make_choice_type_function(['titi', 'toto', 4_2] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCAmelCase = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCAmelCase = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 4_2 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=UpperCAmelCase__ )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=UpperCAmelCase__ )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=UpperCAmelCase__ )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument('--bar' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='help message' )
expected.add_argument('--baz' , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument('--ces' , nargs='+' , default=[] , type=UpperCAmelCase__ )
expected.add_argument('--des' , nargs='+' , default=[] , type=UpperCAmelCase__ )
lowerCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , bar=UpperCAmelCase__ , baz=UpperCAmelCase__ , ces=[] , des=[] ) )
lowerCAmelCase = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=1_2 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('--required_str' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=UpperCAmelCase__ , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=UpperCAmelCase__ , )
expected.add_argument('--opt' , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
expected.add_argument('--baz' , default='toto' , type=UpperCAmelCase__ , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
lowerCAmelCase = parser.parse_dict(UpperCAmelCase__ )[0]
lowerCAmelCase = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 4_2,
}
self.assertRaises(UpperCAmelCase__ , parser.parse_dict , UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'temp_json' )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
lowerCAmelCase = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'temp_yaml' )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
lowerCAmelCase = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> int:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
| 4 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : List[str] = "facebook/wmt19-en-de"
snake_case : Dict = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
snake_case : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt")
snake_case : List[str] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
snake_case : Dict = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 281 | 0 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__snake_case ) , __snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 5 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[str] = np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_snake_case , encoding="utf_8" ) as f:
__magic_name__ : List[str] = csv.reader(_snake_case )
__magic_name__ : List[Any] = []
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = []
for dataset in encoded_datasets:
__magic_name__ : Union[str, Any] = len(_snake_case )
__magic_name__ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__magic_name__ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
__magic_name__ : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__magic_name__ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : str = with_conta
__magic_name__ : Tuple = with_conta
__magic_name__ : Union[str, Any] = len(_snake_case ) - 1
__magic_name__ : int = len(_snake_case ) - 1
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[int] = mc_label
__magic_name__ : str = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_snake_case , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_snake_case , default="" )
parser.add_argument("--eval_dataset" , type=_snake_case , default="" )
parser.add_argument("--seed" , type=_snake_case , default=42 )
parser.add_argument("--num_train_epochs" , type=_snake_case , default=3 )
parser.add_argument("--train_batch_size" , type=_snake_case , default=8 )
parser.add_argument("--eval_batch_size" , type=_snake_case , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_snake_case , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_snake_case , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_snake_case , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_snake_case , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_snake_case , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_snake_case , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_snake_case , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_snake_case , default=0.01 )
parser.add_argument("--lm_coef" , type=_snake_case , default=0.9 )
parser.add_argument("--n_valid" , type=_snake_case , default=374 )
parser.add_argument("--server_ip" , type=_snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_snake_case , default="" , help="Can be used for distant debugging." )
__magic_name__ : List[Any] = parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__magic_name__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__magic_name__ : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__magic_name__ : List[Any] = ["_start_", "_delimiter_", "_classify_"]
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
__magic_name__ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(_snake_case : str ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info("Encoding dataset..." )
__magic_name__ : Optional[int] = load_rocstories_dataset(args.train_dataset )
__magic_name__ : str = load_rocstories_dataset(args.eval_dataset )
__magic_name__ : int = (train_dataset, eval_dataset)
__magic_name__ : List[str] = tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
__magic_name__ : Optional[Any] = model.config.n_positions // 2 - 2
__magic_name__ : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__magic_name__ : List[str] = min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__magic_name__ : List[Any] = pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
__magic_name__ , __magic_name__ : Optional[int] = tensor_datasets[0], tensor_datasets[1]
__magic_name__ : Tuple = TensorDataset(*_snake_case )
__magic_name__ : Union[str, Any] = RandomSampler(_snake_case )
__magic_name__ : Dict = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
__magic_name__ : Any = TensorDataset(*_snake_case )
__magic_name__ : Optional[Any] = SequentialSampler(_snake_case )
__magic_name__ : int = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__magic_name__ : Tuple = args.max_steps
__magic_name__ : List[str] = args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
__magic_name__ : List[str] = len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__magic_name__ : str = list(model.named_parameters() )
__magic_name__ : Dict = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__magic_name__ : str = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__magic_name__ : str = AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__magic_name__ : List[str] = get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__magic_name__ : List[str] = 0
__magic_name__ : Tuple = 0
__magic_name__ : Dict = tqdm(_snake_case , desc="Training" )
for step, batch in enumerate(_snake_case ):
__magic_name__ : Optional[Any] = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = batch
__magic_name__ : Optional[Any] = model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__magic_name__ : List[str] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__magic_name__ : int = "Training loss: {:.2e} lr: {:.2e}".format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__magic_name__ : Dict = model.module if hasattr(_snake_case , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__magic_name__ : List[Any] = os.path.join(args.output_dir , _snake_case )
__magic_name__ : Dict = os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__magic_name__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
__magic_name__ , __magic_name__ : Any = 0, 0
__magic_name__ , __magic_name__ : Union[str, Any] = 0, 0
for batch in tqdm(_snake_case , desc="Evaluating" ):
__magic_name__ : int = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = batch
with torch.no_grad():
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Tuple = mc_logits.detach().cpu().numpy()
__magic_name__ : Any = mc_labels.to("cpu" ).numpy()
__magic_name__ : str = accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__magic_name__ : Tuple = eval_loss / nb_eval_steps
__magic_name__ : List[Any] = eval_accuracy / nb_eval_examples
__magic_name__ : int = tr_loss / nb_tr_steps if args.do_train else None
__magic_name__ : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__magic_name__ : int = os.path.join(args.output_dir , "eval_results.txt" )
with open(_snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _snake_case , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 281 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __A:
def __init__( self , _snake_case=2 , _snake_case=3 , _snake_case=64 , _snake_case=None ) -> Union[str, Any]:
'''simple docstring'''
__a = np.random.default_rng(_snake_case )
__a = length
__a = rng.normal(size=(length,) ).astype(np.floataa )
__a = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Tuple:
'''simple docstring'''
return self.length
def __getitem__( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class __A( torch.nn.Module ):
def __init__( self , _snake_case=0 , _snake_case=0 , _snake_case=False ) -> Any:
'''simple docstring'''
super().__init__()
__a = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__a = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__a = True
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None ) -> Tuple:
'''simple docstring'''
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__a = False
return x * self.a[0] + self.b[0]
class __A( torch.nn.Module ):
def __init__( self , _snake_case=0 , _snake_case=0 , _snake_case=False ) -> Dict:
'''simple docstring'''
super().__init__()
__a = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
__a = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
__a = True
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None ) -> List[Any]:
'''simple docstring'''
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__a = False
return x * self.a + self.b
def __lowerCAmelCase ( a__ , a__ = 16 ) -> List[str]:
from datasets import load_dataset
from transformers import AutoTokenizer
__a = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__a = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__a = load_dataset('''csv''' , data_files=a__ )
__a = datasets['''train'''].unique('''label''' )
__a = {v: i for i, v in enumerate(a__ )}
def tokenize_function(a__ ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=a__ , max_length=a__ , padding='''max_length''' )
if "label" in examples:
__a = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
a__ , batched=a__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(a__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(a__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(tokenized_datasets['''train'''] , shuffle=a__ , collate_fn=a__ , batch_size=2 )
__a = DataLoader(tokenized_datasets['''validation'''] , shuffle=a__ , collate_fn=a__ , batch_size=1 )
return train_dataloader, eval_dataloader | 6 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A ( ctypes.Structure ):
"""simple docstring"""
lowerCamelCase = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def _snake_case( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
A__ = CursorInfo()
A__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
A__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def _snake_case( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
A__ = CursorInfo()
A__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
A__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def _snake_case( ) -> Union[str, Any]:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 7 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = "mock-s3-bucket"
__magic_name__ : Any = F'''s3://{mock_bucket}'''
__magic_name__ : str = extract_path_from_uri(_snake_case )
assert dataset_path.startswith("s3://" ) is False
__magic_name__ : Tuple = "./local/path"
__magic_name__ : Optional[Any] = extract_path_from_uri(_snake_case )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : str = is_remote_filesystem(_snake_case )
assert is_remote is True
__magic_name__ : Optional[int] = fsspec.filesystem("file" )
__magic_name__ : int = is_remote_filesystem(_snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__magic_name__ : str = input_paths[compression_fs_class.protocol]
if input_path is None:
__magic_name__ : Dict = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_snake_case )
__magic_name__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case )
assert isinstance(_snake_case , _snake_case )
__magic_name__ : int = os.path.basename(_snake_case )
__magic_name__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_snake_case , "r" , encoding="utf-8" ) as f, open(_snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : int = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__magic_name__ : int = compressed_file_paths[protocol]
__magic_name__ : Tuple = "dataset.jsonl"
__magic_name__ : List[str] = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__magic_name__ , *__magic_name__ : Optional[Any] = fsspec.get_fs_token_paths(_snake_case )
assert fs.isfile(_snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ) -> str:
'''simple docstring'''
__magic_name__ : int = hf_api.dataset_info(_snake_case , token=_snake_case )
__magic_name__ : Optional[Any] = HfFileSystem(repo_info=_snake_case , token=_snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_snake_case , _snake_case , clobber=_snake_case )
with pytest.warns(_snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 281 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
snake_case_ = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 8 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'convbert'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a=768 , _a=2 , _a=9 , _a=1 , _a=None , **_a , ):
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : List[Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : List[str] = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[Any] = embedding_size
__magic_name__ : List[Any] = head_ratio
__magic_name__ : str = conv_kernel_size
__magic_name__ : Dict = num_groups
__magic_name__ : str = classifier_dropout
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 281 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :str ) -> int:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE : Dict = DisjunctiveConstraint(lowerCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__ ) )
with self.assertRaises(lowerCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __magic_name__( self :int ) -> Union[str, Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__ ):
DisjunctiveConstraint(lowerCAmelCase__ ) # fails here
def __magic_name__( self :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : str = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE : Optional[int] = DisjunctiveConstraint(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = dc.update(1 )
__SCREAMING_SNAKE_CASE : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 )
__SCREAMING_SNAKE_CASE : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(3 )
__SCREAMING_SNAKE_CASE : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveConstraint(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 9 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : int = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
__magic_name__ : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" )
return image
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = dct.pop(_snake_case )
__magic_name__ : int = val
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ : List[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__magic_name__ : Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__magic_name__ : Optional[int] = torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) )
__magic_name__ : Union[str, Any] = qkv_bias
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : str ) -> int:
'''simple docstring'''
__magic_name__ : List[Any] = 364 if "coco" in model_name else 224
__magic_name__ : Union[str, Any] = BlipaVisionConfig(image_size=_snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__magic_name__ : List[str] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=_snake_case ).to_dict()
elif "opt-6.7b" in model_name:
__magic_name__ : Any = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=_snake_case ).to_dict()
elif "t5-xl" in model_name:
__magic_name__ : Dict = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
__magic_name__ : List[Any] = BlipaConfig(vision_config=_snake_case , text_config=_snake_case )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : str=None , _snake_case : Dict=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
__magic_name__ : List[Any] = tokenizer("\n" , add_special_tokens=_snake_case ).input_ids[0]
__magic_name__ , __magic_name__ : Tuple = get_blipa_config(_snake_case , eos_token_id=_snake_case )
__magic_name__ : Union[str, Any] = BlipaForConditionalGeneration(_snake_case ).eval()
__magic_name__ : Any = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
__magic_name__ , __magic_name__ : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__magic_name__ : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = load_model_and_preprocess(
name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case )
original_model.eval()
print("Done!" )
# update state dict keys
__magic_name__ : Dict = original_model.state_dict()
__magic_name__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ : Any = state_dict.pop(_snake_case )
if key.startswith("Qformer.bert" ):
__magic_name__ : Optional[int] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
__magic_name__ : Any = key.replace("self" , "attention" )
if "opt_proj" in key:
__magic_name__ : Union[str, Any] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
__magic_name__ : Optional[int] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
__magic_name__ : List[str] = key.replace("opt" , "language" )
if key.startswith("t5" ):
__magic_name__ : Tuple = key.replace("t5" , "language" )
__magic_name__ : Dict = val
# read in qv biases
read_in_q_v_bias(_snake_case , _snake_case )
__magic_name__ , __magic_name__ : Tuple = hf_model.load_state_dict(_snake_case , strict=_snake_case )
assert len(_snake_case ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__magic_name__ : List[Any] = load_demo_image()
__magic_name__ : Tuple = vis_processors["eval"](_snake_case ).unsqueeze(0 ).to(_snake_case )
__magic_name__ : Dict = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(_snake_case )
# create processor
__magic_name__ : Optional[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_snake_case , image_std=_snake_case )
__magic_name__ : Dict = BlipaProcessor(image_processor=_snake_case , tokenizer=_snake_case )
__magic_name__ : Union[str, Any] = processor(images=_snake_case , return_tensors="pt" ).pixel_values.to(_snake_case )
# make sure processor creates exact same pixel values
assert torch.allclose(_snake_case , _snake_case )
original_model.to(_snake_case )
hf_model.to(_snake_case )
with torch.no_grad():
if "opt" in model_name:
__magic_name__ : List[Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
__magic_name__ : Optional[int] = hf_model(_snake_case , _snake_case ).logits
else:
__magic_name__ : int = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
__magic_name__ : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__magic_name__ : List[str] = hf_model(_snake_case , _snake_case , labels=_snake_case ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__magic_name__ : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=_snake_case )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__magic_name__ : Tuple = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=_snake_case )
else:
# cast to same type
__magic_name__ : str = logits.dtype
assert torch.allclose(original_logits.to(_snake_case ) , _snake_case , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
__magic_name__ : Optional[int] = ""
__magic_name__ : Dict = tokenizer(_snake_case , return_tensors="pt" ).input_ids.to(_snake_case )
__magic_name__ : int = original_model.generate({"image": original_pixel_values} )
__magic_name__ : Optional[Any] = hf_model.generate(
_snake_case , _snake_case , do_sample=_snake_case , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , _snake_case )
__magic_name__ : Tuple = input_ids.shape[1]
__magic_name__ : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_snake_case )
__magic_name__ : Union[str, Any] = [text.strip() for text in output_text]
print("HF generation:" , _snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
snake_case : Union[str, Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
snake_case : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = ProphetNetTokenizer
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
super().setUp()
lowerCamelCase__: Union[str, Any] =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: int ="UNwant\u00E9d,running"
lowerCamelCase__: Optional[Any] ="unwanted, running"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =self.tokenizer_class(self.vocab_file)
lowerCamelCase__: Tuple =tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(UpperCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [9, 6, 7, 12, 10, 11])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =BasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =BasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: int =BasicTokenizer(do_lower_case=UpperCAmelCase_ , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: int =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCamelCase__: Any ={}
for i, token in enumerate(UpperCAmelCase_):
lowerCamelCase__: str =i
lowerCamelCase__: Union[str, Any] =WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
@require_torch
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: int =self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
lowerCamelCase__: int =["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCamelCase__: Any =[1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
lowerCamelCase__: Optional[Any] =tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="pt")
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: int =list(batch.input_ids.numpy()[0])
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
lowerCamelCase__: str =tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: int =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
lowerCamelCase__: int =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 10 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
snake_case : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
snake_case : Union[str, Any] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
__magic_name__ : List[str] = set(_snake_case )
return pairs
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
__magic_name__ : Dict = vocab_file
__magic_name__ : Tuple = merges_file
__magic_name__ : List[Any] = {}
__magic_name__ : List[Any] = 0
__magic_name__ : Tuple = 1
__magic_name__ : int = 2
__magic_name__ : Union[str, Any] = 3
self.add_from_file(_a )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
__magic_name__ : List[str] = merges_handle.read().split("\n" )[:-1]
__magic_name__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__magic_name__ : Union[str, Any] = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _a ):
if token in self.cache:
return self.cache[token]
__magic_name__ : List[Any] = tuple(_a )
__magic_name__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__magic_name__ : Any = get_pairs(_a )
if not pairs:
return token
while True:
__magic_name__ : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : List[str] = bigram
__magic_name__ : List[str] = []
__magic_name__ : List[str] = 0
while i < len(_a ):
try:
__magic_name__ : Any = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Union[str, Any] = tuple(_a )
__magic_name__ : Optional[int] = new_word
if len(_a ) == 1:
break
else:
__magic_name__ : List[Any] = get_pairs(_a )
__magic_name__ : Optional[int] = "@@ ".join(_a )
__magic_name__ : Tuple = word[:-4]
__magic_name__ : str = word
return word
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = []
__magic_name__ : Dict = re.findall(r"\S+\n?" , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = " ".join(_a ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Union[str, Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self , _a ):
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__magic_name__ : List[Any] = f.readlines()
for lineTmp in lines:
__magic_name__ : Optional[Any] = lineTmp.strip()
__magic_name__ : Union[str, Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__magic_name__ : Optional[int] = line[:idx]
__magic_name__ : Dict = len(self.encoder )
| 281 | 0 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0) -> None:
_A , _A : Any = row, column
_A : str = [[default_value for c in range(__lowerCamelCase)] for r in range(__lowerCamelCase)]
def __str__( self) -> str:
_A : Any = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_A : List[str] = 0
for row_vector in self.array:
for obj in row_vector:
_A : Any = max(__lowerCamelCase , len(str(__lowerCamelCase)))
_A : Tuple = F"%{max_element_length}s"
# Make string and return
def single_line(__lowerCamelCase) -> str:
nonlocal string_format_identifier
_A : Tuple = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__lowerCamelCase) for row_vector in self.array)
return s
def __repr__( self) -> str:
return str(self)
def _lowerCamelCase ( self , __lowerCamelCase) -> bool:
if not (isinstance(__lowerCamelCase , (list, tuple)) and len(__lowerCamelCase) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __lowerCamelCase) -> Any:
assert self.validate_indicies(__lowerCamelCase)
return self.array[loc[0]][loc[1]]
def __setitem__( self , __lowerCamelCase , __lowerCamelCase) -> None:
assert self.validate_indicies(__lowerCamelCase)
_A : Optional[int] = value
def __add__( self , __lowerCamelCase) -> Matrix:
assert isinstance(__lowerCamelCase , __lowerCamelCase)
assert self.row == another.row and self.column == another.column
# Add
_A : Optional[int] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_A : str = self[r, c] + another[r, c]
return result
def __neg__( self) -> Matrix:
_A : Any = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_A : Dict = -self[r, c]
return result
def __sub__( self , __lowerCamelCase) -> Matrix:
return self + (-another)
def __mul__( self , __lowerCamelCase) -> Matrix:
if isinstance(__lowerCamelCase , (int, float)): # Scalar multiplication
_A : Optional[Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_A : Dict = self[r, c] * another
return result
elif isinstance(__lowerCamelCase , __lowerCamelCase): # Matrix multiplication
assert self.column == another.row
_A : str = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_A : List[str] = F"Unsupported type given for another ({type(__lowerCamelCase)})"
raise TypeError(__lowerCamelCase)
def _lowerCamelCase ( self) -> Matrix:
_A : Any = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
_A : Optional[int] = self[r, c]
return result
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Any:
assert isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(__lowerCamelCase , __lowerCamelCase)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_A : Any = v.transpose()
_A : Optional[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _UpperCAmelCase ():
# a^(-1)
_A : int = Matrix(3 , 3 , 0 )
for i in range(3 ):
_A : Tuple = 1
print(f"a^(-1) is {ainv}" )
# u, v
_A : List[Any] = Matrix(3 , 1 , 0 )
_A , _A , _A : Optional[Any] = 1, 2, -3
_A : Tuple = Matrix(3 , 1 , 0 )
_A , _A , _A : Optional[int] = 4, -2, 5
print(f"u is {u}" )
print(f"v is {v}" )
print(f"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}" )
def _UpperCAmelCase ():
import doctest
doctest.testmod()
testa()
| 11 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( _snake_case : str = "laptop" ) -> DataFrame:
'''simple docstring'''
__magic_name__ : Tuple = F'''https://www.amazon.in/laptop/s?k={product}'''
__magic_name__ : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__magic_name__ : Tuple = BeautifulSoup(requests.get(_snake_case , headers=_snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__magic_name__ : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
__magic_name__ : Dict = item.ha.text
__magic_name__ : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
__magic_name__ : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
__magic_name__ : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__magic_name__ : Dict = "Not available"
try:
__magic_name__ : Optional[int] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__magic_name__ : List[str] = ""
try:
__magic_name__ : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
__magic_name__ : str = float("nan" )
except AttributeError:
pass
__magic_name__ : Optional[int] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__magic_name__ : Optional[Any] = " "
__magic_name__ : str = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case : Any = "headphones"
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 281 | 0 |
from collections import namedtuple
UpperCAmelCase_ = namedtuple('from_to', 'from_ to')
UpperCAmelCase_ = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def lowerCamelCase__ ( A__ : float , A__ : str , A__ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
from __future__ import annotations
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Optional[Any] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase_ ( _snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase_ ( _snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase_ ( _snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
__magic_name__ : int = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : Optional[Any] = Node(4 )
__magic_name__ : Union[str, Any] = Node(5 )
__magic_name__ : Any = Node(6 )
__magic_name__ : int = Node(7 )
__magic_name__ : List[str] = Node(8 )
__magic_name__ : Union[str, Any] = Node(9 )
print(is_full_binary_tree(_snake_case ) )
print(depth_of_tree(_snake_case ) )
print("Tree is: " )
display(_snake_case )
if __name__ == "__main__":
main()
| 281 | 0 |
import math
lowerCAmelCase : Dict = 10
lowerCAmelCase : Dict = 7
lowerCAmelCase : Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def A_ ( _UpperCAmelCase = 20 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = math.comb(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 13 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = len(_snake_case ) + 1
__magic_name__ : List[str] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ : str = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__magic_name__ : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__magic_name__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ : List[Any] = dp[i - 1][j]
else:
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : Optional[Any] = "aab"
snake_case : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 281 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = tempfile.mkdtemp()
# fmt: off
__A = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__A = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
__A = os.path.join(self.tmpdirname ,A )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(A ,A )
def UpperCamelCase_ ( self : int ,**A : List[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : int ,**A : Optional[Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Dict ):
__A = self.get_tokenizer()
__A = self.get_image_processor()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
processor.save_pretrained(self.tmpdirname )
__A = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : Any ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(A ):
processor()
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = VisionTextDualEncoderProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 15 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
snake_case : List[Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
snake_case : Any = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
snake_case : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=None , _a=True , _a=False ):
if rouge_types is None:
__magic_name__ : str = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
__magic_name__ : List[str] = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a )
if use_aggregator:
__magic_name__ : Dict = scoring.BootstrapAggregator()
else:
__magic_name__ : str = []
for ref, pred in zip(_a , _a ):
__magic_name__ : Union[str, Any] = scorer.score(_a , _a )
if use_aggregator:
aggregator.add_scores(_a )
else:
scores.append(_a )
if use_aggregator:
__magic_name__ : Any = aggregator.aggregate()
else:
__magic_name__ : List[Any] = {}
for key in scores[0]:
__magic_name__ : str = [score[key] for score in scores]
return result
| 281 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def __UpperCAmelCase ( __lowerCamelCase = None ) -> int:
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
lowercase__ : Tuple = nums[0]
for i in range(1 , len(__lowerCamelCase ) ):
lowercase__ : str = nums[i]
lowercase__ : Tuple = max(__lowerCamelCase , ans + num , __lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCAmelCase_ = int(input('Enter number of elements : ').strip())
lowerCAmelCase_ = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 16 |
snake_case : Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCAmelCase_ ( _snake_case : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Tuple = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_snake_case )
__magic_name__ : Optional[int] = "".join(bin(_snake_case )[2:].zfill(8 ) for byte in data )
__magic_name__ : List[Any] = len(_snake_case ) % 6 != 0
if padding_needed:
# The padding that will be added later
__magic_name__ : List[str] = B"=" * ((6 - len(_snake_case ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_snake_case ) % 6)
else:
__magic_name__ : List[str] = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_snake_case ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase_ ( _snake_case : str ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ) and not isinstance(_snake_case , _snake_case ):
__magic_name__ : List[str] = (
"argument should be a bytes-like object or ASCII string, "
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_snake_case )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_snake_case , _snake_case ):
try:
__magic_name__ : List[Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
__magic_name__ : List[str] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_snake_case ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__magic_name__ : Optional[int] = encoded_data[:-padding]
__magic_name__ : Dict = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__magic_name__ : Union[str, Any] = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )
__magic_name__ : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_snake_case ) , 8 )
]
return bytes(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : str = field(default="image-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__UpperCAmelCase : ClassVar[Features] = Features({"image": Image()} )
__UpperCAmelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
__UpperCAmelCase : str = "image"
__UpperCAmelCase : str = "labels"
def _lowercase ( self : str, UpperCAmelCase__ : Optional[int] ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column], UpperCAmelCase__ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
__lowercase = copy.deepcopy(self )
__lowercase = self.label_schema.copy()
__lowercase = features[self.label_column]
__lowercase = label_schema
return task_template
@property
def _lowercase ( self : Optional[Any] ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 17 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
__magic_name__ : List[Any] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Optional[Any] = use_attention_mask
__magic_name__ : Optional[Any] = use_token_type_ids
__magic_name__ : int = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Any = intermediate_size
__magic_name__ : List[Any] = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : List[Any] = num_choices
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_attention_mask:
__magic_name__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : str = None
if self.use_token_type_ids:
__magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = config_and_inputs
__magic_name__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = config_and_inputs
__magic_name__ : Tuple = True
__magic_name__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_class_name in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : List[str] = model(_a )[0]
__magic_name__ : str = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _a )
# compare the actual values for a slice.
__magic_name__ : List[str] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Tuple = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Dict = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 281 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( A__ , unittest.TestCase ):
A = AlbertTokenizer
A = AlbertTokenizerFast
A = True
A = True
A = True
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Optional[int] = AlbertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "this is a test"
SCREAMING_SNAKE_CASE_ : Optional[Any] = "this is a test"
return input_text, output_text
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = "<pad>"
SCREAMING_SNAKE_CASE_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ),_A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ),_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],"<pad>" )
self.assertEqual(vocab_keys[1],"<unk>" )
self.assertEqual(vocab_keys[-1],"▁eloquent" )
self.assertEqual(len(_A ),3_0000 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size,3_0000 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(_A,add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : List[str] = rust_tokenizer.encode(_A,add_special_tokens=_A )
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(_A )
SCREAMING_SNAKE_CASE_ : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A,_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = AlbertTokenizer(_A,keep_accents=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_A,["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ),[48, 25, 21, 1289] )
SCREAMING_SNAKE_CASE_ : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_A,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
SCREAMING_SNAKE_CASE_ : int = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A,[31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."],)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AlbertTokenizer(_A )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode("sequence builders" )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode("multi-sequence build" )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_A,_A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A,model_name="albert-base-v2",revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e",)
| 18 |
def lowerCAmelCase_ ( _snake_case : list[list[int | float]] ) -> int:
'''simple docstring'''
__magic_name__ : Any = len(_snake_case )
__magic_name__ : Optional[Any] = len(matrix[0] )
__magic_name__ : Union[str, Any] = min(_snake_case , _snake_case )
for row in range(_snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _snake_case ):
__magic_name__ : Optional[Any] = matrix[col][row] / matrix[row][row]
for i in range(_snake_case , _snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__magic_name__ : str = True
for i in range(row + 1 , _snake_case ):
if matrix[i][row] != 0:
__magic_name__ , __magic_name__ : List[str] = matrix[i], matrix[row]
__magic_name__ : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(_snake_case ):
__magic_name__ : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=4 , ) -> Optional[Any]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = FlaxRobertaModelTester(self )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("roberta-base" , from_pt=lowercase )
lowerCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 19 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case : Dict = re.compile(R"\b(a|an|the)\b", re.UNICODE)
snake_case : Optional[int] = None
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_snake_case , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_snake_case , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : str = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(_snake_case : List[str] ):
return ARTICLES_REGEX.sub(" " , _snake_case )
def white_space_fix(_snake_case : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_snake_case : Optional[int] ):
__magic_name__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_snake_case ) ) ) )
def lowerCAmelCase_ ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(_snake_case ).split()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Dict ) -> Tuple:
'''simple docstring'''
return int(normalize_answer(_snake_case ) == normalize_answer(_snake_case ) )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : int ) -> str:
'''simple docstring'''
__magic_name__ : Any = get_tokens(_snake_case )
__magic_name__ : Optional[int] = get_tokens(_snake_case )
__magic_name__ : Tuple = collections.Counter(_snake_case ) & collections.Counter(_snake_case )
__magic_name__ : Tuple = sum(common.values() )
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__magic_name__ : Dict = 1.0 * num_same / len(_snake_case )
__magic_name__ : Optional[Any] = 1.0 * num_same / len(_snake_case )
__magic_name__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : Union[str, Any] = qa["id"]
__magic_name__ : Any = [t for t in qa["answers"]["text"] if normalize_answer(_snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__magic_name__ : Tuple = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
__magic_name__ : Any = preds[qid]
# Take max over all gold answers
__magic_name__ : List[Any] = max(compute_exact(_snake_case , _snake_case ) for a in gold_answers )
__magic_name__ : int = max(compute_fa(_snake_case , _snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = {}
for qid, s in scores.items():
__magic_name__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
__magic_name__ : str = float(not qid_to_has_ans[qid] )
else:
__magic_name__ : Optional[int] = s
return new_scores
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple=None ) -> Tuple:
'''simple docstring'''
if not qid_list:
__magic_name__ : Any = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
__magic_name__ : Tuple = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : str , _snake_case : str ) -> Dict:
'''simple docstring'''
for k in new_eval:
__magic_name__ : int = new_eval[k]
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
plt.step(_snake_case , _snake_case , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_snake_case , _snake_case , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_snake_case )
plt.savefig(_snake_case )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]=None , _snake_case : int=None ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
__magic_name__ : Optional[int] = 0.0
__magic_name__ : str = 1.0
__magic_name__ : str = 0.0
__magic_name__ : List[str] = [1.0]
__magic_name__ : str = [0.0]
__magic_name__ : Optional[Any] = 0.0
for i, qid in enumerate(_snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__magic_name__ : List[str] = true_pos / float(i + 1 )
__magic_name__ : Any = true_pos / float(_snake_case )
if i == len(_snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_snake_case )
recalls.append(_snake_case )
if out_image:
plot_pr_curve(_snake_case , _snake_case , _snake_case , _snake_case )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
__magic_name__ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
__magic_name__ : Union[str, Any] = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
__magic_name__ : str = {k: float(_snake_case ) for k, v in qid_to_has_ans.items()}
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_snake_case , _snake_case , "pr_exact" )
merge_eval(_snake_case , _snake_case , "pr_f1" )
merge_eval(_snake_case , _snake_case , "pr_oracle" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
if not qid_list:
return
__magic_name__ : Dict = [na_probs[k] for k in qid_list]
__magic_name__ : str = np.ones_like(_snake_case ) / float(len(_snake_case ) )
plt.hist(_snake_case , weights=_snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_snake_case , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__magic_name__ : List[str] = num_no_ans
__magic_name__ : Dict = cur_score
__magic_name__ : Dict = 0.0
__magic_name__ : Any = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
for i, qid in enumerate(_snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__magic_name__ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
__magic_name__ : List[Any] = -1
else:
__magic_name__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
__magic_name__ : Optional[int] = cur_score
__magic_name__ : List[Any] = na_probs[qid]
return 100.0 * best_score / len(_snake_case ), best_thresh
def lowerCAmelCase_ ( _snake_case : int , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : List[str] = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ , __magic_name__ : int = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ : Optional[int] = best_exact
__magic_name__ : List[Any] = exact_thresh
__magic_name__ : Dict = best_fa
__magic_name__ : Any = fa_thresh
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
with open(OPTS.data_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
__magic_name__ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__magic_name__ : Any = json.load(_snake_case )
else:
__magic_name__ : Any = {k: 0.0 for k in preds}
__magic_name__ : str = make_qid_to_has_ans(_snake_case ) # maps qid to True/False
__magic_name__ : Tuple = [k for k, v in qid_to_has_ans.items() if v]
__magic_name__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
__magic_name__ , __magic_name__ : Union[str, Any] = get_raw_scores(_snake_case , _snake_case )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case )
if has_ans_qids:
__magic_name__ : int = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "HasAns" )
if no_ans_qids:
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , OPTS.out_image_dir )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_snake_case , _snake_case )
else:
print(json.dumps(_snake_case , indent=2 ) )
if __name__ == "__main__":
snake_case : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 281 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowercase : str = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
lowercase : Optional[Any] = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
lowercase : List[str] = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" ,id="""sequence""" ) ,id="""references""" ),
} ) ,codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] ,reference_urls=[
"""https://github.com/m-popovic/chrF""",
] ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = CHRF.CHAR_ORDER ,snake_case = CHRF.WORD_ORDER ,snake_case = CHRF.BETA ,snake_case = False ,snake_case = False ,snake_case = False ,):
'''simple docstring'''
lowercase : Any = len(references[0] )
if any(len(snake_case ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase : int = [[refs[i] for refs in references] for i in range(snake_case )]
lowercase : Tuple = CHRF(snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case )
lowercase : Optional[Any] = sb_chrf.corpus_score(snake_case ,snake_case )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 20 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : str = "▁"
snake_case : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = BigBirdTokenizer
UpperCamelCase__ = BigBirdTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
__magic_name__ : Optional[Any] = self.tokenizer_class(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = "<s>"
__magic_name__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_a ) , 1_004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Any = "I was born in 92000, and this is falsé."
__magic_name__ : Dict = tokenizer.tokenize(_a )
__magic_name__ : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__magic_name__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
__magic_name__ : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Dict = tokenizer.encode(_a )
__magic_name__ : Optional[int] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = BigBirdTokenizer(_a , keep_accents=_a )
__magic_name__ : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
__magic_name__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = "Hello World!"
__magic_name__ : Dict = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
__magic_name__ : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__magic_name__ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__magic_name__ : List[Any] = " ".join(_a )
__magic_name__ : Any = self.big_tokenizer.encode_plus(_a , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : List[str] = BigBirdConfig(attention_type="original_full" )
__magic_name__ : Optional[int] = BigBirdModel(_a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
__magic_name__ : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
__magic_name__ : Optional[Any] = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 281 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'tf_padding'))
self.parent.assertTrue(hasattr(lowerCamelCase, 'depth_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.2_5, lowerCamelCase=8, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=32, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase="relu6", lowerCamelCase=12_80, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, ) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : str = batch_size
_lowercase : Optional[Any] = num_channels
_lowercase : Union[str, Any] = image_size
_lowercase : Dict = depth_multiplier
_lowercase : List[str] = depth_divisible_by
_lowercase : int = min_depth
_lowercase : Union[str, Any] = expand_ratio
_lowercase : int = tf_padding
_lowercase : Optional[Any] = output_stride
_lowercase : List[str] = first_layer_is_expansion
_lowercase : Union[str, Any] = finegrained_output
_lowercase : List[str] = hidden_act
_lowercase : List[Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
_lowercase : Optional[Any] = classifier_dropout_prob
_lowercase : int = use_labels
_lowercase : Union[str, Any] = is_training
_lowercase : Union[str, Any] = num_labels
_lowercase : Optional[Any] = initializer_range
_lowercase : List[Any] = scope
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : int = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.num_labels)
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[Any] = MobileNetVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = self.num_labels
_lowercase : int = MobileNetVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Union[str, Any] = MobileNetVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : List[str] = config_and_inputs
_lowercase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : List[Any] = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : Dict = False
lowercase_ : Tuple = False
lowercase_ : int = False
lowercase_ : Optional[int] = False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : int = MobileNetVaModelTester(self)
_lowercase : Any = MobileNetVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV2 does not output attentions')
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : str = model_class(lowerCamelCase)
_lowercase : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Optional[int] = [*signature.parameters.keys()]
_lowercase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Union[str, Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[str] = outputs.hidden_states
_lowercase : int = 16
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Dict = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = MobileNetVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> str:
_lowercase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224') if is_vision_available() else None
)
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224').to(lowerCamelCase)
_lowercase : List[Any] = self.default_image_processor
_lowercase : Dict = prepare_img()
_lowercase : Optional[Any] = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Any = model(**lowerCamelCase)
# verify the logits
_lowercase : Optional[Any] = torch.Size((1, 10_01))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Dict = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Any = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
_lowercase : Optional[Any] = model.to(lowerCamelCase)
_lowercase : Any = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
_lowercase : Optional[int] = prepare_img()
_lowercase : List[Any] = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Optional[Any] = model(**lowerCamelCase)
_lowercase : List[Any] = outputs.logits
# verify the logits
_lowercase : List[Any] = torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Optional[int] = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
| 21 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : List[str] = {"vocab_file": "spiece.model"}
snake_case : List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
snake_case : Tuple = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
snake_case : List[str] = "▁"
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ : str = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
__magic_name__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Dict = do_lower_case
__magic_name__ : Tuple = remove_space
__magic_name__ : Union[str, Any] = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__magic_name__ : List[str] = self.__dict__.copy()
__magic_name__ : Any = None
return state
def __setstate__( self , _a ):
__magic_name__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : str = {}
__magic_name__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.remove_space:
__magic_name__ : List[Any] = " ".join(inputs.strip().split() )
else:
__magic_name__ : str = inputs
__magic_name__ : int = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__magic_name__ : str = unicodedata.normalize("NFKD" , _a )
__magic_name__ : Tuple = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__magic_name__ : int = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = self.preprocess_text(_a )
__magic_name__ : Dict = self.sp_model.encode(_a , out_type=_a )
__magic_name__ : Any = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__magic_name__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ : List[str] = cur_pieces[1:]
else:
__magic_name__ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = []
__magic_name__ : Union[str, Any] = ""
__magic_name__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__magic_name__ : List[Any] = True
__magic_name__ : Optional[int] = []
else:
current_sub_tokens.append(_a )
__magic_name__ : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[int] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : List[str] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 281 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = np.full((len(_snake_case ), sequence_length, 2) , _snake_case )
else:
__magic_name__ : List[Any] = np.full((len(_snake_case ), sequence_length) , _snake_case )
for i, tensor in enumerate(_snake_case ):
if padding_side == "right":
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = tensor[:sequence_length]
else:
__magic_name__ : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(_snake_case , _snake_case ):
__magic_name__ : List[Any] = tensor[:sequence_length]
else:
__magic_name__ : Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ord(_snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__magic_name__ : Any = unicodedata.category(_snake_case )
if cat.startswith("P" ):
return True
return False
@dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -100
UpperCamelCase__ = "pt"
def SCREAMING_SNAKE_CASE ( self , _a ):
import torch
__magic_name__ : List[str] = "label" if "label" in features[0].keys() else "labels"
__magic_name__ : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__magic_name__ : Optional[int] = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__magic_name__ : Dict = torch.tensor(batch["entity_ids"] ).shape[1]
__magic_name__ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
__magic_name__ : str = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
__magic_name__ : int = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
__magic_name__ : Dict = [feature["ner_tags"] for feature in features]
__magic_name__ : List[Any] = padding_tensor(_a , -1 , _a , _a )
__magic_name__ : Any = [feature["original_entity_spans"] for feature in features]
__magic_name__ : Any = padding_tensor(_a , (-1, -1) , _a , _a )
__magic_name__ : List[Any] = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 281 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : Tuple , __snake_case : Dict=13 , __snake_case : Optional[int]=7 , __snake_case : Optional[int]=True , __snake_case : Dict=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=True , __snake_case : List[Any]=99 , __snake_case : List[Any]=32 , __snake_case : Union[str, Any]=5 , __snake_case : Optional[int]=4 , __snake_case : Optional[int]=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : int=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=512 , __snake_case : int=16 , __snake_case : List[str]=2 , __snake_case : Dict=0.02 , __snake_case : Tuple=4 , ) -> Union[str, Any]:
UpperCAmelCase : int = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Tuple = use_attention_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Any = type_vocab_size
UpperCAmelCase : List[Any] = type_sequence_label_size
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = num_choices
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_attention_mask:
UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = True
lowerCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : str = FlaxRobertaModelTester(self )
@slow
def A ( self : Optional[Any] ) -> int:
for model_class_name in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained('''roberta-base''' , from_pt=__snake_case )
UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
| 23 |
import math
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return math.pow(_snake_case , 2 ) - a
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
__magic_name__ : Optional[int] = 2.0
while start <= a:
__magic_name__ : str = math.pow(_snake_case , 2 )
return start
def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
__magic_name__ : Optional[int] = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__magic_name__ : int = value
__magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Any = 'lxmert'
A_ : List[Any] = {}
def __init__(self : int , a__ : Any=3_0522 , a__ : Optional[int]=768 , a__ : Dict=12 , a__ : int=9500 , a__ : Dict=1600 , a__ : Any=400 , a__ : List[str]=3072 , a__ : List[str]="gelu" , a__ : int=0.1 , a__ : Dict=0.1 , a__ : str=512 , a__ : Any=2 , a__ : Any=0.0_2 , a__ : Union[str, Any]=1E-12 , a__ : str=9 , a__ : Optional[Any]=5 , a__ : int=5 , a__ : Optional[int]=2048 , a__ : Union[str, Any]=4 , a__ : Any=6.6_7 , a__ : List[Any]=True , a__ : str=True , a__ : Optional[Any]=True , a__ : Dict=True , a__ : Dict=True , a__ : int=True , a__ : Union[str, Any]=True , **a__ : List[Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = num_qa_labels
__snake_case = num_object_labels
__snake_case = num_attr_labels
__snake_case = l_layers
__snake_case = x_layers
__snake_case = r_layers
__snake_case = visual_feat_dim
__snake_case = visual_pos_dim
__snake_case = visual_loss_normalizer
__snake_case = task_matched
__snake_case = task_mask_lm
__snake_case = task_obj_predict
__snake_case = task_qa
__snake_case = visual_obj_loss
__snake_case = visual_attr_loss
__snake_case = visual_feat_loss
__snake_case = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**a__ )
| 24 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _snake_case :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ):
__magic_name__ : int = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : List[Any] = is_training
__magic_name__ : Dict = use_labels
__magic_name__ : Optional[Any] = vocab_size
__magic_name__ : int = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Any = eos_token_id
__magic_name__ : str = pad_token_id
__magic_name__ : int = bos_token_id
__magic_name__ : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ : Tuple = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ : List[str] = prepare_led_inputs_dict(_a , _a , _a )
__magic_name__ : Union[str, Any] = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
__magic_name__ : List[Any] = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Dict = TFLEDModel(config=_a ).get_decoder()
__magic_name__ : Optional[int] = inputs_dict["input_ids"]
__magic_name__ : Union[str, Any] = input_ids[:1, :]
__magic_name__ : str = inputs_dict["attention_mask"][:1, :]
__magic_name__ : int = 1
# first forward pass
__magic_name__ : Tuple = model(_a , attention_mask=_a , use_cache=_a )
__magic_name__ , __magic_name__ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ : List[str] = model(_a , attention_mask=_a )[0]
__magic_name__ : Dict = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Any , _snake_case : str=None , _snake_case : List[str]=None , _snake_case : int=None , _snake_case : Any=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
__magic_name__ : str = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFLEDModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[str] = tf.zeros_like(inputs_dict["attention_mask"] )
__magic_name__ : Optional[Any] = 2
__magic_name__ : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__magic_name__ : Any = True
__magic_name__ : str = self.model_tester.seq_length
__magic_name__ : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
__magic_name__ : str = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
__magic_name__ : Any = [t.numpy() for t in outputs.encoder_attentions]
__magic_name__ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = False
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = model_class(_a )
__magic_name__ : str = model(self._prepare_for_class(_a , _a ) )
__magic_name__ : Any = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__magic_name__ : Tuple = model_class(_a )
__magic_name__ : Optional[Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Dict = True
__magic_name__ : str = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__magic_name__ : Union[str, Any] = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
return tf.constant(_snake_case , dtype=tf.intaa )
snake_case : Optional[int] = 1E-4
@slow
@require_tf
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__magic_name__ : Optional[int] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Any = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : List[Any] = model(**_a )[0]
__magic_name__ : List[str] = (1, 1_024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : int = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__magic_name__ : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Optional[Any] = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : Union[str, Any] = model(**_a )[0]
__magic_name__ : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : str = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
| 281 | 0 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=False ) -> List[str]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__magic_name__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Any , _snake_case : Dict=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ : int = ""
else:
__magic_name__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__magic_name__ : int = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Dict = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : List[str] = in_proj_bias[: config.hidden_size]
__magic_name__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : int = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = dct.pop(_snake_case )
__magic_name__ : List[Any] = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ : List[str] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Any , _snake_case : int=False ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_snake_case , )
__magic_name__ : List[str] = ViTHybridConfig(backbone_config=_snake_case , image_size=384 , num_labels=1000 )
__magic_name__ : str = False
# load original model from timm
__magic_name__ : Union[str, Any] = timm.create_model(_snake_case , pretrained=_snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__magic_name__ : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
__magic_name__ : Tuple = create_rename_keys(_snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , _snake_case )
__magic_name__ : List[str] = "huggingface/label-files"
__magic_name__ : int = "imagenet-1k-id2label.json"
__magic_name__ : Optional[int] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
__magic_name__ : int = {int(_snake_case ): v for k, v in idalabel.items()}
__magic_name__ : List[str] = idalabel
__magic_name__ : List[str] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__magic_name__ : List[str] = ViTHybridModel(_snake_case ).eval()
else:
__magic_name__ : str = ViTHybridForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# create image processor
__magic_name__ : List[Any] = create_transform(**resolve_data_config({} , model=_snake_case ) )
__magic_name__ : int = transform.transforms
__magic_name__ : List[str] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
__magic_name__ : int = ViTHybridImageProcessor(
do_resize=_snake_case , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_snake_case , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__magic_name__ : List[Any] = prepare_img()
__magic_name__ : Any = transform(_snake_case ).unsqueeze(0 )
__magic_name__ : Tuple = processor(_snake_case , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_snake_case , _snake_case )
# verify logits
with torch.no_grad():
__magic_name__ : Optional[int] = model(_snake_case )
__magic_name__ : List[str] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
__magic_name__ : List[str] = timm_model.forward_features(_snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 )
else:
__magic_name__ : Any = timm_model(_snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = BarthezTokenizer
_a = BarthezTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Tuple:
super().setUp()
_A : List[str] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_a )
_A : List[str] = tokenizer
def a__ ( self ) -> Any:
_A : Tuple = """<pad>"""
_A : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> str:
_A : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_a ) , 10_1122 )
def a__ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_A : Optional[int] = [0, 57, 3018, 7_0307, 91, 2]
_A : List[str] = self.tokenizer(
_a , max_length=len(_a ) , padding=_a , truncation=_a , return_tensors="""pt""" )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_A : Any = batch.input_ids.tolist()[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
_A : Union[str, Any] = self.get_tokenizer()
_A : Tuple = self.get_rust_tokenizer()
_A : Optional[int] = """I was born in 92000, and this is falsé."""
_A : Any = tokenizer.tokenize(_a )
_A : Union[str, Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : Any = tokenizer.encode(_a , add_special_tokens=_a )
_A : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_A : Optional[Any] = self.get_rust_tokenizer()
_A : Any = tokenizer.encode(_a )
_A : List[str] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def a__ ( self ) -> int:
# fmt: off
_A : int = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_A : Any = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=_a , )
| 26 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : List[str] = "facebook/wmt19-en-de"
snake_case : Dict = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
snake_case : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt")
snake_case : List[str] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
snake_case : Dict = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 281 | 0 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__lowercase : List[str] = datasets.logging.get_logger(__name__)
__lowercase : List[str] = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
__lowercase : Dict = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
__lowercase : Optional[int] = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
__lowercase : str = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
__a : int = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
__a : str = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__a : List[str] = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
__a : Optional[int] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__a : Optional[int] = score.BleurtScorer(os.path.join(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Union[str, Any] = self.scorer.score(references=__a , candidates=__a )
return {"scores": scores}
| 27 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
snake_case : Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[str] = np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_snake_case , encoding="utf_8" ) as f:
__magic_name__ : List[str] = csv.reader(_snake_case )
__magic_name__ : List[Any] = []
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = []
for dataset in encoded_datasets:
__magic_name__ : Union[str, Any] = len(_snake_case )
__magic_name__ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__magic_name__ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa )
__magic_name__ : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__magic_name__ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__magic_name__ : str = with_conta
__magic_name__ : Tuple = with_conta
__magic_name__ : Union[str, Any] = len(_snake_case ) - 1
__magic_name__ : int = len(_snake_case ) - 1
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[Any] = with_conta
__magic_name__ : Optional[int] = mc_label
__magic_name__ : str = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_snake_case , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_snake_case , default="" )
parser.add_argument("--eval_dataset" , type=_snake_case , default="" )
parser.add_argument("--seed" , type=_snake_case , default=42 )
parser.add_argument("--num_train_epochs" , type=_snake_case , default=3 )
parser.add_argument("--train_batch_size" , type=_snake_case , default=8 )
parser.add_argument("--eval_batch_size" , type=_snake_case , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_snake_case , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_snake_case , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_snake_case , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_snake_case , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_snake_case , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_snake_case , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_snake_case , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_snake_case , default=0.01 )
parser.add_argument("--lm_coef" , type=_snake_case , default=0.9 )
parser.add_argument("--n_valid" , type=_snake_case , default=374 )
parser.add_argument("--server_ip" , type=_snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_snake_case , default="" , help="Can be used for distant debugging." )
__magic_name__ : List[Any] = parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__magic_name__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__magic_name__ : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__magic_name__ : List[Any] = ["_start_", "_delimiter_", "_classify_"]
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
__magic_name__ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(_snake_case : str ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info("Encoding dataset..." )
__magic_name__ : Optional[int] = load_rocstories_dataset(args.train_dataset )
__magic_name__ : str = load_rocstories_dataset(args.eval_dataset )
__magic_name__ : int = (train_dataset, eval_dataset)
__magic_name__ : List[str] = tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
__magic_name__ : Optional[Any] = model.config.n_positions // 2 - 2
__magic_name__ : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__magic_name__ : List[str] = min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__magic_name__ : List[Any] = pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
__magic_name__ , __magic_name__ : Optional[int] = tensor_datasets[0], tensor_datasets[1]
__magic_name__ : Tuple = TensorDataset(*_snake_case )
__magic_name__ : Union[str, Any] = RandomSampler(_snake_case )
__magic_name__ : Dict = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
__magic_name__ : Any = TensorDataset(*_snake_case )
__magic_name__ : Optional[Any] = SequentialSampler(_snake_case )
__magic_name__ : int = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__magic_name__ : Tuple = args.max_steps
__magic_name__ : List[str] = args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
__magic_name__ : List[str] = len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__magic_name__ : str = list(model.named_parameters() )
__magic_name__ : Dict = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__magic_name__ : str = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__magic_name__ : str = AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__magic_name__ : List[str] = get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__magic_name__ : List[str] = 0
__magic_name__ : Tuple = 0
__magic_name__ : Dict = tqdm(_snake_case , desc="Training" )
for step, batch in enumerate(_snake_case ):
__magic_name__ : Optional[Any] = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = batch
__magic_name__ : Optional[Any] = model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__magic_name__ : List[str] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__magic_name__ : int = "Training loss: {:.2e} lr: {:.2e}".format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__magic_name__ : Dict = model.module if hasattr(_snake_case , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__magic_name__ : List[Any] = os.path.join(args.output_dir , _snake_case )
__magic_name__ : Dict = os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__magic_name__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__magic_name__ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
__magic_name__ , __magic_name__ : Any = 0, 0
__magic_name__ , __magic_name__ : Union[str, Any] = 0, 0
for batch in tqdm(_snake_case , desc="Evaluating" ):
__magic_name__ : int = tuple(t.to(_snake_case ) for t in batch )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = batch
with torch.no_grad():
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__magic_name__ : Tuple = mc_logits.detach().cpu().numpy()
__magic_name__ : Any = mc_labels.to("cpu" ).numpy()
__magic_name__ : str = accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__magic_name__ : Tuple = eval_loss / nb_eval_steps
__magic_name__ : List[Any] = eval_accuracy / nb_eval_examples
__magic_name__ : int = tr_loss / nb_tr_steps if args.do_train else None
__magic_name__ : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__magic_name__ : int = os.path.join(args.output_dir , "eval_results.txt" )
with open(_snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _snake_case , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 281 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**9 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , **_UpperCamelCase , ) -> int:
UpperCAmelCase_ : Dict = path_or_paths
UpperCAmelCase_ : Union[str, Any] = split if split or isinstance(_UpperCamelCase , _UpperCamelCase ) else 'train'
UpperCAmelCase_ : Dict = features
UpperCAmelCase_ : Optional[int] = cache_dir
UpperCAmelCase_ : int = keep_in_memory
UpperCAmelCase_ : List[str] = streaming
UpperCAmelCase_ : Any = num_proc
UpperCAmelCase_ : List[Any] = kwargs
@abstractmethod
def __UpperCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , **_UpperCamelCase , ) -> Any:
UpperCAmelCase_ : Any = features
UpperCAmelCase_ : List[Any] = cache_dir
UpperCAmelCase_ : List[Any] = keep_in_memory
UpperCAmelCase_ : Any = streaming
UpperCAmelCase_ : str = num_proc
UpperCAmelCase_ : Optional[Any] = kwargs
@abstractmethod
def __UpperCAmelCase ( self ) -> Union[Dataset, IterableDataset]:
pass
| 29 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = "mock-s3-bucket"
__magic_name__ : Any = F'''s3://{mock_bucket}'''
__magic_name__ : str = extract_path_from_uri(_snake_case )
assert dataset_path.startswith("s3://" ) is False
__magic_name__ : Tuple = "./local/path"
__magic_name__ : Optional[Any] = extract_path_from_uri(_snake_case )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : str = is_remote_filesystem(_snake_case )
assert is_remote is True
__magic_name__ : Optional[int] = fsspec.filesystem("file" )
__magic_name__ : int = is_remote_filesystem(_snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__magic_name__ : str = input_paths[compression_fs_class.protocol]
if input_path is None:
__magic_name__ : Dict = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_snake_case )
__magic_name__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case )
assert isinstance(_snake_case , _snake_case )
__magic_name__ : int = os.path.basename(_snake_case )
__magic_name__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_snake_case , "r" , encoding="utf-8" ) as f, open(_snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : int = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__magic_name__ : int = compressed_file_paths[protocol]
__magic_name__ : Tuple = "dataset.jsonl"
__magic_name__ : List[str] = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__magic_name__ , *__magic_name__ : Optional[Any] = fsspec.get_fs_token_paths(_snake_case )
assert fs.isfile(_snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ) -> str:
'''simple docstring'''
__magic_name__ : int = hf_api.dataset_info(_snake_case , token=_snake_case )
__magic_name__ : Optional[Any] = HfFileSystem(repo_info=_snake_case , token=_snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_snake_case , _snake_case , clobber=_snake_case )
with pytest.warns(_snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 281 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :jnp.dtype = jnp.floataa
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
lowercase_ = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :jnp.dtype = jnp.floataa
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowercase_ = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :int = None
a :float = 0.0
a :bool = None
a :jnp.dtype = jnp.floataa
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = self.in_channels if self.out_channels is None else self.out_channels
lowercase_ = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
lowercase_ = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ = nn.Dropout(self.dropout_prob )
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase_ = None
if use_nin_shortcut:
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=True ) -> Tuple:
lowercase_ = hidden_states
lowercase_ = self.norma(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.swish(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.conva(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
lowercase_ = hidden_states + temb
lowercase_ = self.norma(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.swish(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
lowercase_ = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 30 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'convbert'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a=768 , _a=2 , _a=9 , _a=1 , _a=None , **_a , ):
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : List[Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : List[str] = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[Any] = embedding_size
__magic_name__ : List[Any] = head_ratio
__magic_name__ : str = conv_kernel_size
__magic_name__ : Dict = num_groups
__magic_name__ : str = classifier_dropout
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 281 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
__SCREAMING_SNAKE_CASE : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__SCREAMING_SNAKE_CASE : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = "whisper"
__UpperCamelCase: int = ["past_key_values"]
__UpperCamelCase: Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , A : List[str]=51865 , A : Dict=80 , A : Any=6 , A : Any=4 , A : List[str]=6 , A : Union[str, Any]=4 , A : Optional[Any]=1536 , A : Optional[int]=1536 , A : Tuple=0.0 , A : str=0.0 , A : str=50257 , A : Optional[int]=True , A : Union[str, Any]=True , A : Dict="gelu" , A : Optional[Any]=256 , A : Tuple=0.0 , A : List[str]=0.0 , A : str=0.0 , A : Optional[Any]=0.02 , A : Tuple=False , A : Optional[Any]=1500 , A : Any=448 , A : Dict=50256 , A : int=50256 , A : str=50256 , A : str=None , A : Dict=[220, 50256] , A : str=False , A : int=256 , A : int=False , A : Optional[Any]=0.05 , A : Optional[Any]=10 , A : str=2 , A : str=0.0 , A : Any=10 , A : Any=0 , A : List[str]=7 , **A : Union[str, Any] , ):
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Union[str, Any] = num_mel_bins
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : Union[str, Any] = encoder_attention_heads
_UpperCAmelCase : List[Any] = decoder_layers
_UpperCAmelCase : Dict = decoder_attention_heads
_UpperCAmelCase : List[Any] = decoder_ffn_dim
_UpperCAmelCase : Tuple = encoder_ffn_dim
_UpperCAmelCase : Tuple = dropout
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : Tuple = activation_dropout
_UpperCAmelCase : Dict = activation_function
_UpperCAmelCase : Any = init_std
_UpperCAmelCase : Tuple = encoder_layerdrop
_UpperCAmelCase : Any = decoder_layerdrop
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Optional[Any] = max_source_positions
_UpperCAmelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase : Optional[int] = classifier_proj_size
_UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : Union[str, Any] = apply_spec_augment
_UpperCAmelCase : List[str] = mask_time_prob
_UpperCAmelCase : List[Any] = mask_time_length
_UpperCAmelCase : Optional[Any] = mask_time_min_masks
_UpperCAmelCase : Optional[Any] = mask_feature_prob
_UpperCAmelCase : List[str] = mask_feature_length
_UpperCAmelCase : str = mask_feature_min_masks
_UpperCAmelCase : Optional[Any] = median_filter_width
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , suppress_tokens=A , begin_suppress_tokens=A , **A , )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@property
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Optional[int] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
_UpperCAmelCase : Any = {0: "batch"}
else:
_UpperCAmelCase : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(A , direction="inputs" )
return common_inputs
def _A ( self : str , A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , A : int = -1 , A : int = -1 , A : bool = False , A : Optional["TensorType"] = None , A : int = 22050 , A : float = 5.0 , A : int = 220 , ):
_UpperCAmelCase : int = OrderedDict()
_UpperCAmelCase : Optional[int] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=A , framework=A , sampling_rate=A , time_duration=A , frequency=A , )
_UpperCAmelCase : List[str] = encoder_inputs["input_features"].shape[2]
_UpperCAmelCase : Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCAmelCase : Any = super().generate_dummy_inputs(
preprocessor.tokenizer , A , A , A , A )
_UpperCAmelCase : int = encoder_inputs.pop("input_features" )
_UpperCAmelCase : Any = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
_UpperCAmelCase : str = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _A ( self : Dict ):
return 1E-3
| 31 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : int = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
__magic_name__ : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" )
return image
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = dct.pop(_snake_case )
__magic_name__ : int = val
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ : List[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__magic_name__ : Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__magic_name__ : Optional[int] = torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) )
__magic_name__ : Union[str, Any] = qkv_bias
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : str ) -> int:
'''simple docstring'''
__magic_name__ : List[Any] = 364 if "coco" in model_name else 224
__magic_name__ : Union[str, Any] = BlipaVisionConfig(image_size=_snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__magic_name__ : List[str] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=_snake_case ).to_dict()
elif "opt-6.7b" in model_name:
__magic_name__ : Any = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=_snake_case ).to_dict()
elif "t5-xl" in model_name:
__magic_name__ : Dict = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
__magic_name__ : List[Any] = BlipaConfig(vision_config=_snake_case , text_config=_snake_case )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : str=None , _snake_case : Dict=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
__magic_name__ : List[Any] = tokenizer("\n" , add_special_tokens=_snake_case ).input_ids[0]
__magic_name__ , __magic_name__ : Tuple = get_blipa_config(_snake_case , eos_token_id=_snake_case )
__magic_name__ : Union[str, Any] = BlipaForConditionalGeneration(_snake_case ).eval()
__magic_name__ : Any = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
__magic_name__ , __magic_name__ : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__magic_name__ : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = load_model_and_preprocess(
name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case )
original_model.eval()
print("Done!" )
# update state dict keys
__magic_name__ : Dict = original_model.state_dict()
__magic_name__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ : Any = state_dict.pop(_snake_case )
if key.startswith("Qformer.bert" ):
__magic_name__ : Optional[int] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
__magic_name__ : Any = key.replace("self" , "attention" )
if "opt_proj" in key:
__magic_name__ : Union[str, Any] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
__magic_name__ : Optional[int] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
__magic_name__ : List[str] = key.replace("opt" , "language" )
if key.startswith("t5" ):
__magic_name__ : Tuple = key.replace("t5" , "language" )
__magic_name__ : Dict = val
# read in qv biases
read_in_q_v_bias(_snake_case , _snake_case )
__magic_name__ , __magic_name__ : Tuple = hf_model.load_state_dict(_snake_case , strict=_snake_case )
assert len(_snake_case ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__magic_name__ : List[Any] = load_demo_image()
__magic_name__ : Tuple = vis_processors["eval"](_snake_case ).unsqueeze(0 ).to(_snake_case )
__magic_name__ : Dict = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(_snake_case )
# create processor
__magic_name__ : Optional[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_snake_case , image_std=_snake_case )
__magic_name__ : Dict = BlipaProcessor(image_processor=_snake_case , tokenizer=_snake_case )
__magic_name__ : Union[str, Any] = processor(images=_snake_case , return_tensors="pt" ).pixel_values.to(_snake_case )
# make sure processor creates exact same pixel values
assert torch.allclose(_snake_case , _snake_case )
original_model.to(_snake_case )
hf_model.to(_snake_case )
with torch.no_grad():
if "opt" in model_name:
__magic_name__ : List[Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
__magic_name__ : Optional[int] = hf_model(_snake_case , _snake_case ).logits
else:
__magic_name__ : int = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
__magic_name__ : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__magic_name__ : List[str] = hf_model(_snake_case , _snake_case , labels=_snake_case ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__magic_name__ : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=_snake_case )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__magic_name__ : Tuple = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=_snake_case )
else:
# cast to same type
__magic_name__ : str = logits.dtype
assert torch.allclose(original_logits.to(_snake_case ) , _snake_case , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
__magic_name__ : Optional[int] = ""
__magic_name__ : Dict = tokenizer(_snake_case , return_tensors="pt" ).input_ids.to(_snake_case )
__magic_name__ : int = original_model.generate({"image": original_pixel_values} )
__magic_name__ : Optional[Any] = hf_model.generate(
_snake_case , _snake_case , do_sample=_snake_case , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , _snake_case )
__magic_name__ : Tuple = input_ids.shape[1]
__magic_name__ : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_snake_case )
__magic_name__ : Union[str, Any] = [text.strip() for text in output_text]
print("HF generation:" , _snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
snake_case : Union[str, Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
snake_case : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : Optional[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : List[Any] , __A : int=8 ) -> Optional[Any]:
"""simple docstring"""
a_ : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a_ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : VQModel , ) -> Optional[int]:
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , movq=SCREAMING_SNAKE_CASE__ , )
a_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
if latents is None:
a_ : Tuple = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a_ : Tuple = latents.to(SCREAMING_SNAKE_CASE__ )
a_ : int = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
a_ : int = torch.device(F"""cuda:{gpu_id}""" )
a_ : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
a_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a_ : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
a_ , a_ : Optional[int] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_module_hook=SCREAMING_SNAKE_CASE__ )
# We'll offload the last model manually.
a_ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Optional[int]:
a_ : Union[str, Any] = self._execution_device
a_ : int = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : List[Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : List[str] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a_ : int = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Optional[Any] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Optional[Any] = hint.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self.scheduler.timesteps
a_ : str = self.movq.config.latent_channels
a_ , a_ : Optional[Any] = downscale_height_and_width(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.movq_scale_factor )
# create initial latent
a_ : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
a_ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a_ : Dict = {'image_embeds': image_embeds, 'hint': hint}
a_ : List[Any] = self.unet(
sample=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , added_cond_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
if do_classifier_free_guidance:
a_ , a_ : str = noise_pred.split(latents.shape[1] , dim=1 )
a_ , a_ : Optional[Any] = noise_pred.chunk(2 )
a_ , a_ : Optional[Any] = variance_pred.chunk(2 )
a_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a_ , a_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a_ : Optional[int] = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )[0]
# post-processing
a_ : Dict = self.movq.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a_ : str = image * 0.5 + 0.5
a_ : str = image.clamp(0 , 1 )
a_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a_ : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 32 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
snake_case : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
snake_case : Union[str, Any] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
__magic_name__ : List[str] = set(_snake_case )
return pairs
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
__magic_name__ : Dict = vocab_file
__magic_name__ : Tuple = merges_file
__magic_name__ : List[Any] = {}
__magic_name__ : List[Any] = 0
__magic_name__ : Tuple = 1
__magic_name__ : int = 2
__magic_name__ : Union[str, Any] = 3
self.add_from_file(_a )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
__magic_name__ : List[str] = merges_handle.read().split("\n" )[:-1]
__magic_name__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__magic_name__ : Union[str, Any] = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _a ):
if token in self.cache:
return self.cache[token]
__magic_name__ : List[Any] = tuple(_a )
__magic_name__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__magic_name__ : Any = get_pairs(_a )
if not pairs:
return token
while True:
__magic_name__ : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : List[str] = bigram
__magic_name__ : List[str] = []
__magic_name__ : List[str] = 0
while i < len(_a ):
try:
__magic_name__ : Any = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Union[str, Any] = tuple(_a )
__magic_name__ : Optional[int] = new_word
if len(_a ) == 1:
break
else:
__magic_name__ : List[Any] = get_pairs(_a )
__magic_name__ : Optional[int] = "@@ ".join(_a )
__magic_name__ : Tuple = word[:-4]
__magic_name__ : str = word
return word
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = []
__magic_name__ : Dict = re.findall(r"\S+\n?" , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = " ".join(_a ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Union[str, Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self , _a ):
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__magic_name__ : List[Any] = f.readlines()
for lineTmp in lines:
__magic_name__ : Optional[Any] = lineTmp.strip()
__magic_name__ : Union[str, Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__magic_name__ : Optional[int] = line[:idx]
__magic_name__ : Dict = len(self.encoder )
| 281 | 0 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A : int = {'''UserAgent''': UserAgent().random}
def lowercase ( __snake_case : Optional[int] ):
lowercase_ : Dict = script.contents[0]
lowercase_ : List[Any] = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _UpperCAmelCase :
def __init__( self : List[Any] , A : Any ) -> Any:
lowercase_ : int = F'''https://www.instagram.com/{username}/'''
lowercase_ : Union[str, Any] = self.get_json()
def A ( self : Optional[int] ) -> dict:
lowercase_ : List[Any] = requests.get(self.url , headers=A ).text
lowercase_ : List[str] = BeautifulSoup(A , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : int ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : Dict ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def A ( self : Dict ) -> str:
return self.user_data["username"]
@property
def A ( self : Any ) -> str:
return self.user_data["full_name"]
@property
def A ( self : int ) -> str:
return self.user_data["biography"]
@property
def A ( self : int ) -> str:
return self.user_data["business_email"]
@property
def A ( self : Union[str, Any] ) -> str:
return self.user_data["external_url"]
@property
def A ( self : List[str] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def A ( self : Optional[int] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def A ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def A ( self : List[str] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def A ( self : Optional[Any] ) -> bool:
return self.user_data["is_verified"]
@property
def A ( self : List[str] ) -> bool:
return self.user_data["is_private"]
def lowercase ( __snake_case : str = "github" ):
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
lowercase_ : Optional[Any] = InstagramUser(__snake_case )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __snake_case )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Tuple = InstagramUser('''github''')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 33 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( _snake_case : str = "laptop" ) -> DataFrame:
'''simple docstring'''
__magic_name__ : Tuple = F'''https://www.amazon.in/laptop/s?k={product}'''
__magic_name__ : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__magic_name__ : Tuple = BeautifulSoup(requests.get(_snake_case , headers=_snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__magic_name__ : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
__magic_name__ : Dict = item.ha.text
__magic_name__ : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
__magic_name__ : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
__magic_name__ : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__magic_name__ : Dict = "Not available"
try:
__magic_name__ : Optional[int] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__magic_name__ : List[str] = ""
try:
__magic_name__ : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
__magic_name__ : str = float("nan" )
except AttributeError:
pass
__magic_name__ : Optional[int] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__magic_name__ : Optional[Any] = " "
__magic_name__ : str = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case : Any = "headphones"
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 281 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def snake_case_ (_a : Any ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def snake_case_ (_a : List[Any] ):
UpperCAmelCase = create_tensor(_a )
UpperCAmelCase = gather(_a )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def snake_case_ (_a : str ):
UpperCAmelCase = [state.process_index]
UpperCAmelCase = gather_object(_a )
assert len(_a ) == state.num_processes, F"{gathered_obj}, {len(_a )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def snake_case_ (_a : Optional[int] ):
UpperCAmelCase = create_tensor(_a )
UpperCAmelCase = broadcast(_a )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def snake_case_ (_a : Optional[Any] ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCAmelCase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase = pad_across_processes(_a )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def snake_case_ (_a : Tuple ):
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase = create_tensor(_a )
UpperCAmelCase = reduce(_a , '''sum''' )
UpperCAmelCase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_a , _a ), F"{reduced_tensor} != {truth_tensor}"
def snake_case_ (_a : Optional[Any] ):
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase = create_tensor(_a )
UpperCAmelCase = reduce(_a , '''mean''' )
UpperCAmelCase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_a , _a ), F"{reduced_tensor} != {truth_tensor}"
def snake_case_ (_a : Any ):
# For xla_spawn (TPUs)
main()
def snake_case_ ():
UpperCAmelCase = PartialState()
state.print(F"State: {state}" )
state.print('''testing gather''' )
test_gather(_a )
state.print('''testing gather_object''' )
test_gather_object(_a )
state.print('''testing broadcast''' )
test_broadcast(_a )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(_a )
state.print('''testing reduce_sum''' )
test_reduce_sum(_a )
state.print('''testing reduce_mean''' )
test_reduce_mean(_a )
if __name__ == "__main__":
main()
| 34 |
from __future__ import annotations
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Optional[Any] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase_ ( _snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase_ ( _snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase_ ( _snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
__magic_name__ : int = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : Optional[Any] = Node(4 )
__magic_name__ : Union[str, Any] = Node(5 )
__magic_name__ : Any = Node(6 )
__magic_name__ : int = Node(7 )
__magic_name__ : List[str] = Node(8 )
__magic_name__ : Union[str, Any] = Node(9 )
print(is_full_binary_tree(_snake_case ) )
print(depth_of_tree(_snake_case ) )
print("Tree is: " )
display(_snake_case )
if __name__ == "__main__":
main()
| 281 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "swinv2"
lowercase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , snake_case_ : int=224 , snake_case_ : List[Any]=4 , snake_case_ : List[Any]=3 , snake_case_ : Optional[Any]=96 , snake_case_ : str=[2, 2, 6, 2] , snake_case_ : Tuple=[3, 6, 12, 24] , snake_case_ : Optional[Any]=7 , snake_case_ : List[str]=4.0 , snake_case_ : Optional[int]=True , snake_case_ : Any=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Any="gelu" , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=0.02 , snake_case_ : Dict=1E-5 , snake_case_ : Optional[int]=32 , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
snake_case__ : Optional[int] = image_size
snake_case__ : Union[str, Any] = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : str = embed_dim
snake_case__ : List[str] = depths
snake_case__ : int = len(snake_case_ )
snake_case__ : Union[str, Any] = num_heads
snake_case__ : Tuple = window_size
snake_case__ : str = mlp_ratio
snake_case__ : Optional[Any] = qkv_bias
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Optional[Any] = drop_path_rate
snake_case__ : Tuple = hidden_act
snake_case__ : str = use_absolute_embeddings
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[int] = initializer_range
snake_case__ : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : List[str] = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
snake_case__ : Tuple = (0, 0, 0, 0)
| 35 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = len(_snake_case ) + 1
__magic_name__ : List[str] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ : str = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__magic_name__ : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__magic_name__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ : List[Any] = dp[i - 1][j]
else:
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : Optional[Any] = "aab"
snake_case : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 281 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = None
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 2
@register_to_config
def __init__( self, __a = 0.02, __a = 100, __a = 1.007, __a = 80, __a = 0.05, __a = 50, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = sigma_max
# setable values
_lowerCAmelCase : int = None
_lowerCAmelCase : np.IntTensor = None
_lowerCAmelCase : torch.FloatTensor = None # sigma(t_i)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = num_inference_steps
_lowerCAmelCase : Optional[Any] = np.arange(0, self.num_inference_steps)[::-1].copy()
_lowerCAmelCase : Tuple = torch.from_numpy(__a).to(__a)
_lowerCAmelCase : Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_lowerCAmelCase : int = torch.tensor(__a, dtype=torch.floataa, device=__a)
def snake_case__ ( self, __a, __a, __a = None):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCAmelCase : Any = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1)
else:
_lowerCAmelCase : str = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCAmelCase : Any = self.config.s_noise * randn_tensor(sample.shape, generator=__a).to(sample.device)
_lowerCAmelCase : Optional[Any] = sigma + gamma * sigma
_lowerCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case__ ( self, __a, __a, __a, __a, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sample_hat + sigma_hat * model_output
_lowerCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat
_lowerCAmelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a, derivative=__a, pred_original_sample=__a)
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = sample_prev + sigma_prev * model_output
_lowerCAmelCase : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
_lowerCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a, derivative=__a, pred_original_sample=__a)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
raise NotImplementedError()
| 36 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 | 0 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_lowerCAmelCase = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_lowerCAmelCase = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_lowerCAmelCase = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_lowerCAmelCase = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] ,reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]:
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=0.9 ,__UpperCAmelCase=3 ,__UpperCAmelCase=0.5 ) -> List[Any]:
if NLTK_VERSION >= version.Version("""3.6.5""" ):
lowerCAmelCase__ : Optional[int] = [
meteor_score.single_meteor_score(
word_tokenize(__UpperCAmelCase ) ,word_tokenize(__UpperCAmelCase ) ,alpha=__UpperCAmelCase ,beta=__UpperCAmelCase ,gamma=__UpperCAmelCase )
for ref, pred in zip(__UpperCAmelCase ,__UpperCAmelCase )
]
else:
lowerCAmelCase__ : Dict = [
meteor_score.single_meteor_score(__UpperCAmelCase ,__UpperCAmelCase ,alpha=__UpperCAmelCase ,beta=__UpperCAmelCase ,gamma=__UpperCAmelCase )
for ref, pred in zip(__UpperCAmelCase ,__UpperCAmelCase )
]
return {"meteor": np.mean(__UpperCAmelCase )}
| 37 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
snake_case : List[Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
snake_case : Any = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
snake_case : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=None , _a=True , _a=False ):
if rouge_types is None:
__magic_name__ : str = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
__magic_name__ : List[str] = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a )
if use_aggregator:
__magic_name__ : Dict = scoring.BootstrapAggregator()
else:
__magic_name__ : str = []
for ref, pred in zip(_a , _a ):
__magic_name__ : Union[str, Any] = scorer.score(_a , _a )
if use_aggregator:
aggregator.add_scores(_a )
else:
scores.append(_a )
if use_aggregator:
__magic_name__ : Any = aggregator.aggregate()
else:
__magic_name__ : List[Any] = {}
for key in scores[0]:
__magic_name__ : str = [score[key] for score in scores]
return result
| 281 | 0 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = [0] * len(__magic_name__ )
UpperCamelCase :int = []
UpperCamelCase :str = []
UpperCamelCase :str = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
UpperCamelCase :str = queue.pop(0 )
cnt += 1
topo.append(__magic_name__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__magic_name__ )
if cnt != len(__magic_name__ ):
print("""Cycle exists""" )
else:
print(__magic_name__ )
# Adjacency List of Graph
UpperCAmelCase_ : str = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 38 |
snake_case : Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCAmelCase_ ( _snake_case : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : Tuple = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_snake_case )
__magic_name__ : Optional[int] = "".join(bin(_snake_case )[2:].zfill(8 ) for byte in data )
__magic_name__ : List[Any] = len(_snake_case ) % 6 != 0
if padding_needed:
# The padding that will be added later
__magic_name__ : List[str] = B"=" * ((6 - len(_snake_case ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_snake_case ) % 6)
else:
__magic_name__ : List[str] = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_snake_case ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase_ ( _snake_case : str ) -> bytes:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ) and not isinstance(_snake_case , _snake_case ):
__magic_name__ : List[str] = (
"argument should be a bytes-like object or ASCII string, "
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_snake_case )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_snake_case , _snake_case ):
try:
__magic_name__ : List[Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
__magic_name__ : List[str] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_snake_case ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__magic_name__ : Optional[int] = encoded_data[:-padding]
__magic_name__ : Dict = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__magic_name__ : Union[str, Any] = "".join(
bin(B64_CHARSET.index(_snake_case ) )[2:].zfill(6 ) for char in encoded_data )
__magic_name__ : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_snake_case ) , 8 )
]
return bytes(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 0 |
from math import pi, sqrt, tan
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_UpperCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(__lowerCAmelCase , 2 ) * torus_radius * tube_radius
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_UpperCAmelCase = (sidea + sidea + sidea) / 2
_UpperCAmelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 39 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
__magic_name__ : List[Any] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Optional[Any] = use_attention_mask
__magic_name__ : Optional[Any] = use_token_type_ids
__magic_name__ : int = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Any = intermediate_size
__magic_name__ : List[Any] = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : List[Any] = num_choices
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_attention_mask:
__magic_name__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : str = None
if self.use_token_type_ids:
__magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = config_and_inputs
__magic_name__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = config_and_inputs
__magic_name__ : Tuple = True
__magic_name__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_class_name in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : List[str] = model(_a )[0]
__magic_name__ : str = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _a )
# compare the actual values for a slice.
__magic_name__ : List[str] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Tuple = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Dict = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 281 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40 |
def lowerCAmelCase_ ( _snake_case : list[list[int | float]] ) -> int:
'''simple docstring'''
__magic_name__ : Any = len(_snake_case )
__magic_name__ : Optional[Any] = len(matrix[0] )
__magic_name__ : Union[str, Any] = min(_snake_case , _snake_case )
for row in range(_snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _snake_case ):
__magic_name__ : Optional[Any] = matrix[col][row] / matrix[row][row]
for i in range(_snake_case , _snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__magic_name__ : str = True
for i in range(row + 1 , _snake_case ):
if matrix[i][row] != 0:
__magic_name__ , __magic_name__ : List[str] = matrix[i], matrix[row]
__magic_name__ : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(_snake_case ):
__magic_name__ : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.