code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 353
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = load_from_cache_file
lowercase = file_format
lowercase = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 32
| 0
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCAmelCase : Optional[Any] ="""<<<<<<< This should probably be modified because it mentions: """
__lowerCAmelCase : Optional[Any] ="""=======
>>>>>>>
"""
__lowerCAmelCase : Tuple =[
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__lowerCAmelCase : Any =[
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def UpperCAmelCase__ ( lowerCAmelCase__ :Namespace ) -> Dict:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _A ( lowerCAmelCase ):
@staticmethod
def A__ ( __lowerCAmelCase ):
"""simple docstring"""
lowercase = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase ):
"""simple docstring"""
lowercase = get_logger("""datasets-cli/converting""" )
lowercase = tfds_path
lowercase = datasets_directory
def A__ ( self ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
lowercase = []
lowercase = []
lowercase = {}
if os.path.isdir(self._tfds_path ):
lowercase = os.listdir(__lowerCAmelCase )
else:
lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
lowercase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
lowercase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
lowercase = f.readlines()
lowercase = []
lowercase = False
lowercase = False
lowercase = []
for line in lines:
lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
lowercase = """"""
continue
elif "from absl import logging" in out_line:
lowercase = """from datasets import logging\n"""
elif "getLogger" in out_line:
lowercase = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase = True
lowercase = list(filter(lambda __lowerCAmelCase : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + """\n""" )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
lowercase = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase = f_name.replace(""".py""" , """""" )
lowercase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
lowercase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
lowercase = os.path.basename(__lowerCAmelCase )
lowercase = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 354
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 32
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCAmelCase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 355
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = dct.pop(lowerCAmelCase__ )
lowercase = val
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowercase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase__ )
lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase = 1_0_2_4
lowercase = 4_0_9_6
lowercase = 2_4
lowercase = 1_6
lowercase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = False
lowercase = """relu"""
lowercase = 1_0_2_4
lowercase = True
lowercase = False
lowercase = False
# load HuggingFace model
lowercase = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
lowercase = TrOCRForCausalLM(lowerCAmelCase__ )
lowercase = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" , check_hash=lowerCAmelCase__ )["""model"""]
lowercase = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase = state_dict.pop(lowerCAmelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowercase = val
else:
lowercase = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
lowercase = ViTImageProcessor(size=encoder_config.image_size )
lowercase = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowercase = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
lowercase = outputs.logits
lowercase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
lowercase = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
lowercase = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__lowerCAmelCase : Dict =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 32
| 0
|
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
lowercase = os.path.abspath(lowerCAmelCase__ )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
lowercase = tf.train.list_variables(lowerCAmelCase__ )
lowercase = []
lowercase = []
lowercase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase = name[1:]
# figure out how many levels deep the name is
lowercase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase__ )
# read data
lowercase = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
names.append("""/""".join(lowerCAmelCase__ ) )
arrays.append(lowerCAmelCase__ )
logger.info(f'Read a total of {len(lowerCAmelCase__ ):,} layers' )
# Sanity check
if len(set(lowerCAmelCase__ ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(lowerCAmelCase__ ) )})' )
lowercase = list(set(lowerCAmelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = full_name.split("""/""" )
lowercase = model
lowercase = []
for i, m_name in enumerate(lowerCAmelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
lowercase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
lowercase = getattr(lowerCAmelCase__ , """embeddings""" )
lowercase = getattr(lowerCAmelCase__ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
lowercase = getattr(lowerCAmelCase__ , """encoder""" )
lowercase = getattr(lowerCAmelCase__ , """layer""" )
lowercase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
lowercase = getattr(lowerCAmelCase__ , """pooler""" )
lowercase = getattr(lowerCAmelCase__ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
lowercase = getattr(lowerCAmelCase__ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
lowercase = getattr(lowerCAmelCase__ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
lowercase = getattr(lowerCAmelCase__ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
lowercase = getattr(lowerCAmelCase__ , """token_type_embeddings""" )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append("""weight""" )
lowercase = getattr(lowerCAmelCase__ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
lowercase = getattr(lowerCAmelCase__ , """attention""" )
lowercase = getattr(lowerCAmelCase__ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
lowercase = getattr(lowerCAmelCase__ , """attention""" )
lowercase = getattr(lowerCAmelCase__ , """output""" )
lowercase = getattr(lowerCAmelCase__ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
lowercase = getattr(lowerCAmelCase__ , """attention""" )
lowercase = getattr(lowerCAmelCase__ , """output""" )
lowercase = getattr(lowerCAmelCase__ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
lowercase = getattr(lowerCAmelCase__ , """output""" )
lowercase = getattr(lowerCAmelCase__ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
lowercase = getattr(lowerCAmelCase__ , """output""" )
lowercase = getattr(lowerCAmelCase__ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
lowercase = getattr(lowerCAmelCase__ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
lowercase = getattr(lowerCAmelCase__ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
lowercase = getattr(lowerCAmelCase__ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
lowercase = getattr(lowerCAmelCase__ , """intermediate""" )
lowercase = getattr(lowerCAmelCase__ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
lowercase = getattr(lowerCAmelCase__ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
lowercase = getattr(lowerCAmelCase__ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
lowercase = getattr(lowerCAmelCase__ , """weight""" )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
lowercase = """.""".join(lowerCAmelCase__ )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowerCAmelCase__ ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , lowerCAmelCase__ ):
lowercase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase = array.transpose()
if pointer.shape == array.shape:
lowercase = torch.from_numpy(lowerCAmelCase__ )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
logger.info(f'Loading model based on config from {config_path}...' )
lowercase = BertConfig.from_json_file(lowerCAmelCase__ )
lowercase = BertModel(lowerCAmelCase__ )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 356
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 32
| 0
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = R"""\w+[.]\d+"""
lowercase = re.findall(lowerCAmelCase__ , lowerCAmelCase__ )
for pat in pats:
lowercase = key.replace(lowerCAmelCase__ , """_""".join(pat.split(""".""" ) ) )
return key
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] ) -> Any:
'''simple docstring'''
lowercase = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowercase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any]=4_2 ) -> Optional[int]:
'''simple docstring'''
lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase = flax_model.init_weights(PRNGKey(lowerCAmelCase__ ) )
lowercase = flatten_dict(lowerCAmelCase__ )
lowercase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase = rename_key(lowerCAmelCase__ )
lowercase = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowercase , lowercase = rename_key_and_reshape_tensor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
lowercase = jnp.asarray(lowerCAmelCase__ )
return unflatten_dict(lowerCAmelCase__ )
| 357
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 32
| 0
|
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__lowerCAmelCase : Optional[Any] ="""scheduler_config.json"""
class _A ( lowerCAmelCase ):
snake_case__ : str = 1
snake_case__ : int = 2
snake_case__ : str = 3
snake_case__ : List[Any] = 4
snake_case__ : Optional[int] = 5
snake_case__ : str = 6
snake_case__ : int = 7
snake_case__ : List[str] = 8
snake_case__ : Any = 9
snake_case__ : Tuple = 10
snake_case__ : str = 11
snake_case__ : List[str] = 12
snake_case__ : List[str] = 13
snake_case__ : Optional[Any] = 14
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : torch.FloatTensor
class _A :
snake_case__ : int = SCHEDULER_CONFIG_NAME
snake_case__ : str = []
snake_case__ : Tuple = True
@classmethod
def A__ ( cls , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase , lowercase , lowercase = cls.load_config(
pretrained_model_name_or_path=__lowerCAmelCase , subfolder=__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , return_commit_hash=__lowerCAmelCase , **__lowerCAmelCase , )
return cls.from_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False , **__lowerCAmelCase ):
"""simple docstring"""
self.save_config(save_directory=__lowerCAmelCase , push_to_hub=__lowerCAmelCase , **__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def A__ ( cls ):
"""simple docstring"""
lowercase = list(set([cls.__name__] + cls._compatibles ) )
lowercase = importlib.import_module(__name__.split(""".""" )[0] )
lowercase = [
getattr(__lowerCAmelCase , __lowerCAmelCase ) for c in compatible_classes_str if hasattr(__lowerCAmelCase , __lowerCAmelCase )
]
return compatible_classes
| 358
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
snake_case__ : Optional[str] = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 32
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ViTFeatureExtractor"""]
__lowerCAmelCase : List[str] =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCAmelCase : Dict ={"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _A ( unittest.TestCase ):
snake_case__ : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case__ : Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case__ : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowercase = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] )
lowercase = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowercase = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
# Legacy behavior
lowercase = text_classifier("""This is great !""" , return_all_scores=__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowercase = text_classifier("""This is great !""" , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] )
lowercase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowercase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
] , )
@require_torch
def A__ ( self ):
"""simple docstring"""
import torch
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@slow
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline("""text-classification""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowercase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowercase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
@slow
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline("""text-classification""" , framework="""tf""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowercase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowercase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = TextClassificationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowercase = """HuggingFace is in"""
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
lowercase = ["""HuggingFace is in """, """Paris is in France"""]
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}, {"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowercase = text_classifier(__lowerCAmelCase , top_k=__lowerCAmelCase )
lowercase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] * N, [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] * N] , )
lowercase = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowercase = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(__lowerCAmelCase ):
text_classifier(__lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowercase = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 360
|
"""simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = credit_card_number
lowercase = 0
lowercase = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
lowercase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowercase = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(lowerCAmelCase__ ) <= 1_6:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 361
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : List[str] =logging.getLogger(__name__)
__lowerCAmelCase : Dict =tf.data.AUTOTUNE
def UpperCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCAmelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCAmelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCAmelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCAmelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCAmelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCAmelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCAmelCase__ , default=2**1_8 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCAmelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCAmelCase__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCAmelCase__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase__ , default=5_1_2 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCAmelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCAmelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowercase = parser.parse_args()
return args
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
try:
if args.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
for file in file_list:
lowercase = file.split("""/""" )[-1]
lowercase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCAmelCase__ ).group(1 )
lowercase = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
lowercase = count_samples(lowerCAmelCase__ )
lowercase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
lowercase = dataset.shuffle(len(lowerCAmelCase__ ) )
lowercase = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase = dataset.shuffle(args.shuffle_buffer_size )
lowercase = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
lowercase = dataset.prefetch(lowerCAmelCase__ )
return dataset
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
if not args.no_tpu:
lowercase = initialize_tpu(lowerCAmelCase__ )
lowercase = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase = tokenizer.vocab_size
lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase = count_samples(lowerCAmelCase__ )
lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCAmelCase__ :Any ):
lowercase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCAmelCase__ :Dict ):
# TF really needs an isin() function
lowercase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowercase , lowercase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =parse_args()
main(args)
| 32
| 0
|
"""simple docstring"""
import math
import unittest
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def A__ ( self ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 362
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : List[Any] ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCAmelCase : Optional[Any] ={"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> str:
'''simple docstring'''
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int]="attention" ) -> Optional[Any]:
'''simple docstring'''
lowercase = lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :str=False ) -> str:
'''simple docstring'''
if split_mlp_wi:
lowercase = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase = (wi_a, wi_a)
else:
lowercase = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def UpperCAmelCase__ ( lowerCAmelCase__ :dict , *, lowerCAmelCase__ :int , lowerCAmelCase__ :bool , lowerCAmelCase__ :bool = False ) -> Union[str, Any]:
'''simple docstring'''
lowercase = traverse_util.flatten_dict(variables["""target"""] )
lowercase = {"""/""".join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase__ )
lowercase = collections.OrderedDict()
# Shared embeddings.
lowercase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (MLP).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , lowerCAmelCase__ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase = tax_relpos_bias_lookup(
lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" ).T
lowercase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowercase = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """encoder""" ).T
lowercase = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """self_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (Cross Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """encoder_decoder_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 2 (MLP).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , lowerCAmelCase__ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase = tax_relpos_bias_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" ).T
lowercase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase = old["""decoder/logits_dense/kernel"""].T
return new
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :bool ) -> Tuple:
'''simple docstring'''
lowercase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowercase = state_dict["""shared.weight"""]
return state_dict
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
lowercase = convert_tax_to_pytorch(
lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ , scalable_attention=lowerCAmelCase__ )
lowercase = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
lowercase = MTaConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase = UMTaEncoderModel(lowerCAmelCase__ )
else:
lowercase = UMTaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__lowerCAmelCase : str =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 364
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32
| 0
|
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCAmelCase : Any ={
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _A ( lowerCAmelCase ):
snake_case__ : str = 'facebook/nllb-200-distilled-600M'
snake_case__ : str = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ : Optional[int] = 'translator'
snake_case__ : int = AutoTokenizer
snake_case__ : str = AutoModelForSeqaSeqLM
snake_case__ : List[Any] = LANGUAGE_CODES
snake_case__ : Optional[Any] = ['text', 'text', 'text']
snake_case__ : List[str] = ['text']
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
lowercase = self.lang_to_code[src_lang]
lowercase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__lowerCAmelCase , return_tensors="""pt""" , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.model.generate(**__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__lowerCAmelCase )
| 365
|
"""simple docstring"""
import enum
import shutil
import sys
__lowerCAmelCase , __lowerCAmelCase : List[str] =shutil.get_terminal_size()
__lowerCAmelCase : Union[str, Any] ={"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _A ( enum.Enum ):
snake_case__ : Tuple = 0
snake_case__ : List[str] = 1
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]="" ) -> Optional[Any]:
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
forceWrite("""\r""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 32
| 0
|
"""simple docstring"""
import numpy as np
def UpperCAmelCase__ ( lowerCAmelCase__ :np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] ) -> Dict:
'''simple docstring'''
if openai_config_file == "":
lowercase = OpenAIGPTConfig()
else:
lowercase = OpenAIGPTConfig.from_json_file(lowerCAmelCase__ )
lowercase = OpenAIGPTModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
lowercase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
__lowerCAmelCase : str =parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 367
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : List[Any] =numpy.array([0, 0])
__lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : List[Any] =numpy.array([1, 0])
__lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray:
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[Any] = ['transformers', 'torch', 'note_seq']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 368
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = credit_card_number
lowercase = 0
lowercase = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
lowercase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowercase = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(lowerCAmelCase__ ) <= 1_6:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 32
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> int:
'''simple docstring'''
lowercase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase = 1_2_8
elif "12-12" in model_name:
lowercase = 1_2
lowercase = 1_2
elif "14-14" in model_name:
lowercase = 1_4
lowercase = 1_4
elif "16-16" in model_name:
lowercase = 1_6
lowercase = 1_6
else:
raise ValueError("""Model not supported""" )
lowercase = """huggingface/label-files"""
if "speech-commands" in model_name:
lowercase = 3_5
lowercase = """speech-commands-v2-id2label.json"""
else:
lowercase = 5_2_7
lowercase = """audioset-id2label.json"""
lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
if "module.v" in name:
lowercase = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
lowercase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
lowercase = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
lowercase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
lowercase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
lowercase = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
lowercase = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=False ) -> Optional[int]:
'''simple docstring'''
lowercase = get_audio_spectrogram_transformer_config(lowerCAmelCase__ )
lowercase = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
lowercase = model_name_to_url[model_name]
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" )
# remove some keys
remove_keys(lowerCAmelCase__ )
# rename some keys
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
# load 🤗 model
lowercase = ASTForAudioClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
lowercase = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
lowercase = 1_0_2_4 if """speech-commands""" not in model_name else 1_2_8
lowercase = ASTFeatureExtractor(mean=lowerCAmelCase__ , std=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
if "speech-commands" in model_name:
lowercase = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
lowercase = dataset[0]["""audio"""]["""array"""]
else:
lowercase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
lowercase , lowercase = torchaudio.load(lowerCAmelCase__ )
lowercase = waveform.squeeze().numpy()
lowercase = feature_extractor(lowerCAmelCase__ , sampling_rate=1_6_0_0_0 , return_tensors="""pt""" )
# forward pass
lowercase = model(**lowerCAmelCase__ )
lowercase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCAmelCase : str =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 369
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
def extract(*__lowerCAmelCase , **__lowerCAmelCase ):
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = torch.ones([0] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
self.pixel_values.to(__lowerCAmelCase )
return self
return Out()
return extract
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert isinstance(pipe.scheduler , __lowerCAmelCase )
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowercase = 40_0366_0346
lowercase = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowercase = 27_3497_1755
lowercase = 7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowercase = 10_4435_5234
lowercase = 12
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 32
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 370
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list[list]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
lowercase = data_set.copy()
lowercase = simplify(lowerCAmelCase__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32
| 0
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 371
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A : Any = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : Optional[Any] = logging.get_logger(__name__)
A : Any = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class __A( a , a ):
snake_case_ = '''resnet'''
snake_case_ = ['''basic''', '''bottleneck''']
def __init__( self , _snake_case=3 , _snake_case=64 , _snake_case=[256, 512, 1_024, 2_048] , _snake_case=[3, 4, 6, 3] , _snake_case="bottleneck" , _snake_case="relu" , _snake_case=False , _snake_case=None , _snake_case=None , **_snake_case , ) -> int:
'''simple docstring'''
super().__init__(**_snake_case )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
__a = num_channels
__a = embedding_size
__a = hidden_sizes
__a = depths
__a = layer_type
__a = hidden_act
__a = downsample_in_first_stage
__a = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_snake_case ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-3
| 33
|
A : Optional[Any] = tuple[float, float, float]
A : Union[str, Any] = tuple[float, float, float]
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = end_pointa[0] - end_pointa[0]
__a = end_pointa[1] - end_pointa[1]
__a = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = ab[1] * ac[2] - ab[2] * ac[1] # *i
__a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__a = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> bool:
return tuple(round(a__ , a__ ) for x in vector ) == (0, 0, 0)
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 10 ) -> bool:
__a = create_vector(a__ , a__ )
__a = create_vector(a__ , a__ )
return is_zero_vector(get_ad_vectors_cross(a__ , a__ ) , a__ )
| 33
| 1
|
def __lowerCAmelCase ( a__ , a__ , a__ ) -> float:
__a = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
A : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
A : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Dict:
for attribute in key.split('''.''' ):
__a = getattr(a__ , a__ )
if weight_type is not None:
__a = getattr(a__ , a__ ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__a = True
if "*" in mapped_key:
__a = name.split(a__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = '''weight'''
else:
__a = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> int:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True ) -> Tuple:
if config_path is not None:
__a = UniSpeechSatConfig.from_pretrained(a__ )
else:
__a = UniSpeechSatConfig()
__a = ''''''
if is_finetuned:
__a = UniSpeechSatForCTC(a__ )
else:
__a = UniSpeechSatForPreTraining(a__ )
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__a = model[0].eval()
recursively_load_weights(a__ , a__ )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A : Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 33
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __A( a ):
snake_case_ = '''convbert'''
def __init__( self , _snake_case=30_522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case=768 , _snake_case=2 , _snake_case=9 , _snake_case=1 , _snake_case=None , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case , )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = embedding_size
__a = head_ratio
__a = conv_kernel_size
__a = num_groups
__a = classifier_dropout
class __A( a ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 33
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A( a ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a = bertabert.config.encoder.vocab_size
__a = tokenizer.sep_token_id
__a = tokenizer.cls_token_id
__a = 128
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a = train_dataset.select(range(32 ) )
__a = val_dataset.select(range(16 ) )
__a = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
__a = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
__a = inputs.input_ids
__a = inputs.attention_mask
__a = outputs.input_ids
__a = outputs.input_ids.copy()
__a = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
__a = pred.label_ids
__a = pred.predictions
# all unnecessary tokens are removed
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
__a = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a = self.get_auto_remove_tmp_dir()
__a = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 33
| 1
|
def __lowerCAmelCase ( a__ , a__ , a__ ) -> int:
def count_of_possible_combinations(a__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> int:
def count_of_possible_combinations_with_dp_array(
a__ , a__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__a = sum(
count_of_possible_combinations_with_dp_array(target - item , a__ )
for item in array )
__a = answer
return answer
__a = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> int:
__a = [0] * (target + 1)
__a = 1
for i in range(1 , target + 1 ):
for j in range(a__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Tuple = 3
A : Tuple = 5
A : List[str] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 33
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None ) -> int:
if attention_mask is None:
__a = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __A:
snake_case_ = OPTConfig
snake_case_ = {}
snake_case_ = '''gelu'''
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=16 , _snake_case=2 , _snake_case=4 , _snake_case=4 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=20 , _snake_case=2 , _snake_case=1 , _snake_case=0 , _snake_case=16 , _snake_case=16 , ) -> Tuple:
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
__a = embed_dim
__a = word_embed_proj_dim
__a = False
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a = tf.concat([input_ids, eos_tensor] , axis=1 )
__a = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_snake_case , **self.config_updates , )
__a = prepare_opt_inputs_dict(_snake_case , _snake_case )
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Dict:
'''simple docstring'''
__a = TFOPTModel(config=_snake_case )
__a = inputs_dict['''input_ids''']
__a = input_ids[:1, :]
__a = inputs_dict['''attention_mask'''][:1, :]
__a = 1
# first forward pass
__a = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__a = tf.concat([input_ids, next_tokens] , axis=-1 )
__a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__a = model(_snake_case , attention_mask=_snake_case )[0]
__a = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__a = output_from_no_past[:, -3:, random_slice_idx]
__a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 )
@require_tf
class __A( a , a , unittest.TestCase ):
snake_case_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
snake_case_ = (TFOPTForCausalLM,) if is_tf_available() else ()
snake_case_ = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 1_0
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = TFOPTModelTester(self )
__a = ConfigTester(self , config_class=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_snake_case , _snake_case ):
if hasattr(_snake_case , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_snake_case , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__a = model_class(config=_snake_case )
__a = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__a = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_snake_case )
__a = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__a = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__a = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _snake_case )
# check that weights remain the same after resizing
__a = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__a = False
self.assertTrue(_snake_case )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _snake_case )
__a = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__a = False
self.assertTrue(_snake_case )
def __lowerCAmelCase ( a__ ) -> int:
return tf.constant(a__ , dtype=tf.intaa )
@require_tf
class __A( unittest.TestCase ):
snake_case_ = 9_9
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__a = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__a = input_ids.shape[0]
__a = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __A( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__a = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__a = tf.not_equal(_snake_case , model.config.pad_token_id )
with tf.GradientTape():
__a = model(input_ids=_snake_case , attention_mask=_snake_case ).last_hidden_state
__a = (1, 11, 512)
self.assertEqual(output.shape , _snake_case )
__a = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-3 ) )
__a = tf.function(_snake_case , jit_compile=_snake_case )
__a = xla_generate(_snake_case , _snake_case )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-2 ) )
@require_tf
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
super().setUp()
__a = '''facebook/opt-350m'''
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = TFOPTForCausalLM.from_pretrained(self.path_model )
__a = GPTaTokenizer.from_pretrained(self.path_model )
__a = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__a = tokenizer(_snake_case , return_tensors='''tf''' , padding=_snake_case , add_special_tokens=_snake_case )
__a = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__a = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
__a = tf.function(_snake_case , jit_compile=_snake_case )
__a = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
@require_tf
@slow
class __A( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = '''facebook/opt-125m'''
__a = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__a = []
__a = GPTaTokenizer.from_pretrained(_snake_case )
__a = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__a = tokenizer(_snake_case , return_tensors='''tf''' ).input_ids
__a = model.generate(_snake_case , max_length=10 )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = '''facebook/opt-350m'''
__a = GPTaTokenizer.from_pretrained(_snake_case )
__a = TFOPTForCausalLM.from_pretrained(_snake_case )
__a = '''left'''
# use different length sentences to test batching
__a = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__a = tokenizer(_snake_case , return_tensors='''tf''' , padding=_snake_case )
__a = inputs['''input_ids''']
__a = model.generate(input_ids=_snake_case , attention_mask=inputs['''attention_mask'''] )
__a = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__a = model.generate(input_ids=_snake_case )
__a = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__a = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__a = model.generate(input_ids=_snake_case , max_length=model.config.max_length - num_paddings )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case )
__a = tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case )
__a = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence] )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = '''facebook/opt-350m'''
__a = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__a = []
__a = GPTaTokenizer.from_pretrained(_snake_case )
__a = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__a = tokenizer(_snake_case , return_tensors='''tf''' ).input_ids
__a = model.generate(_snake_case , max_length=10 )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
| 33
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __A( a ):
snake_case_ = 0
snake_case_ = False
snake_case_ = 3.0
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _snake_case )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
A : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
A : Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
A : int = torch.nn.Linear(1_0_0, 2_0_0)
A : Optional[int] = accelerator.prepare(model)
# Check the values changed in kwargs
A : List[Any] = ''
A : Tuple = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 33
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
A : Any = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __A( a ):
snake_case_ = '''facebook/nllb-200-distilled-600M'''
snake_case_ = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
snake_case_ = '''translator'''
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = LANGUAGE_CODES
snake_case_ = ['''text''', '''text''', '''text''']
snake_case_ = ['''text''']
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
__a = self.lang_to_code[src_lang]
__a = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_snake_case , return_tensors='''pt''' , src_lang=_snake_case , tgt_lang=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
return self.model.generate(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[Any]:
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_snake_case )
| 33
|
import os
# Precomputes a list of the 100 first triangular numbers
A : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __lowerCAmelCase ( ) -> Tuple:
__a = os.path.dirname(os.path.realpath(a__ ) )
__a = os.path.join(a__ , '''words.txt''' )
__a = ''''''
with open(a__ ) as f:
__a = f.readline()
__a = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
__a = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 33
| 1
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A : Dict = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class __A:
snake_case_ = 42
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a , __a , __a = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Dict:
'''simple docstring'''
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.major, self.minor, self.patch
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
return Version(_snake_case )
elif isinstance(_snake_case , _snake_case ):
return other
raise TypeError(F"""{other} (type {type(_snake_case )}) cannot be compared to version.""" )
def __eq__( self , _snake_case ) -> Dict:
'''simple docstring'''
try:
__a = self._validate_operand(_snake_case )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self._validate_operand(_snake_case )
return self.tuple < other.tuple
def __hash__( self ) -> str:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return self.version_str
def __lowerCAmelCase ( a__ ) -> List[str]:
__a = _VERSION_REG.match(a__ )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(a__ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
return ".".join(str(a__ ) for v in version_tuple )
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['PerceiverFeatureExtractor']
A : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> np.ndarray:
__a = cva.getAffineTransform(a__ , a__ )
return cva.warpAffine(a__ , a__ , (rows, cols) )
if __name__ == "__main__":
# read original image
A : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A : Any = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A , A : List[Any] = gray_img.shape
# set different points to rotate image
A : str = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
A : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
A : Tuple = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
A : Tuple = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
A : Tuple = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A : Union[str, Any] = plt.figure(1)
A : str = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __A( a ):
snake_case_ = '''char'''
snake_case_ = '''bpe'''
snake_case_ = '''wp'''
A : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __A( a ):
snake_case_ = ['''image_processor''', '''char_tokenizer''']
snake_case_ = '''ViTImageProcessor'''
snake_case_ = '''MgpstrTokenizer'''
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> str:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
__a = tokenizer
__a = AutoTokenizer.from_pretrained('''gpt2''' )
__a = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None:
__a = self.char_tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
__a = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[Any]:
'''simple docstring'''
__a , __a , __a = sequences
__a = char_preds.size(0 )
__a , __a = self._decode_helper(_snake_case , '''char''' )
__a , __a = self._decode_helper(_snake_case , '''bpe''' )
__a , __a = self._decode_helper(_snake_case , '''wp''' )
__a = []
__a = []
for i in range(_snake_case ):
__a = [char_scores[i], bpe_scores[i], wp_scores[i]]
__a = [char_strs[i], bpe_strs[i], wp_strs[i]]
__a = scores.index(max(_snake_case ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__a = {}
__a = final_strs
__a = final_scores
__a = char_strs
__a = bpe_strs
__a = wp_strs
return out
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str:
'''simple docstring'''
if format == DecodeType.CHARACTER:
__a = self.char_decode
__a = 1
__a = '''[s]'''
elif format == DecodeType.BPE:
__a = self.bpe_decode
__a = 2
__a = '''#'''
elif format == DecodeType.WORDPIECE:
__a = self.wp_decode
__a = 102
__a = '''[SEP]'''
else:
raise ValueError(F"""Format {format} is not supported.""" )
__a , __a = [], []
__a = pred_logits.size(0 )
__a = pred_logits.size(1 )
__a , __a = pred_logits.topk(1 , dim=-1 , largest=_snake_case , sorted=_snake_case )
__a = preds_index.view(-1 , _snake_case )[:, 1:]
__a = decoder(_snake_case )
__a , __a = torch.nn.functional.softmax(_snake_case , dim=2 ).max(dim=2 )
__a = preds_max_prob[:, 1:]
for index in range(_snake_case ):
__a = preds_str[index].find(_snake_case )
__a = preds_str[index][:pred_eos]
__a = preds_index[index].cpu().tolist()
__a = pred_index.index(_snake_case ) if eos_token in pred_index else -1
__a = preds_max_prob[index][: pred_eos_index + 1]
__a = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_snake_case )
conf_scores.append(_snake_case )
return dec_strs, conf_scores
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_snake_case )]
return decode_strs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
__a = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_snake_case )]
return decode_strs
| 33
|
from string import ascii_uppercase
A : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
A : Union[str, Any] = dict(enumerate(ascii_uppercase))
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = len(a__ )
__a = 0
while True:
if x == i:
__a = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__a = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__a = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowerCAmelCase ( ) -> None:
__a = '''THE GERMAN ATTACK'''
__a = '''SECRET'''
__a = generate_key(a__ , a__ )
__a = cipher_text(a__ , a__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(a__ , a__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 33
| 1
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A : List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __A( a ):
def __init__( self , _snake_case , _snake_case , _snake_case=None , _snake_case=1 ) -> str:
'''simple docstring'''
__a = tokenizer
__a = dataset
__a = len(_snake_case ) if n_tasks is None else n_tasks
__a = n_copies
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
__a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
__a = self.tokenizer(_snake_case , padding=_snake_case , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __A( a ):
def __init__( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = start_length
__a = eof_strings
__a = tokenizer
def __call__( self , _snake_case , _snake_case , **_snake_case ) -> str:
'''simple docstring'''
__a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_snake_case )
def __lowerCAmelCase ( a__ ) -> Any:
__a = re.split('''(%s)''' % '''|'''.join(a__ ) , a__ )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ , a__=20 , **a__ ) -> Optional[Any]:
__a = defaultdict(a__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(a__ ) ):
with torch.no_grad():
__a = batch['''ids'''].shape[-1]
__a = accelerator.unwrap_model(a__ ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=a__ , **a__ )
# each task is generated batch_size times
__a = batch['''task_id'''].repeat(a__ )
__a = accelerator.pad_across_processes(
a__ , dim=1 , pad_index=tokenizer.pad_token_id )
__a , __a = accelerator.gather((generated_tokens, generated_tasks) )
__a = generated_tokens.cpu().numpy()
__a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(a__ , a__ ):
gen_token_dict[task].append(a__ )
__a = [[] for _ in range(a__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__a = tokenizer.decode(a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ )
code_gens[task].append(remove_last_block(a__ ) )
return code_gens
def __lowerCAmelCase ( ) -> List[str]:
# Setup configuration
__a = HfArgumentParser(a__ )
__a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__a = '''false'''
if args.num_workers is None:
__a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__a = Accelerator()
set_seed(args.seed , device_specific=a__ )
# Load model and tokenizer
__a = AutoTokenizer.from_pretrained(args.model_ckpt )
__a = tokenizer.eos_token
__a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__a = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , a__ , a__ )] ),
}
# Load evaluation dataset and metric
__a = load_dataset('''openai_humaneval''' )
__a = load_metric('''code_eval''' )
__a = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
__a = args.n_samples // args.batch_size
__a = TokenizedDataset(a__ , human_eval['''test'''] , n_copies=a__ , n_tasks=a__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__a = DataLoader(a__ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__a = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
__a , __a = accelerator.prepare(a__ , a__ )
__a = complete_code(
a__ , a__ , a__ , a__ , n_tasks=a__ , batch_size=args.batch_size , **a__ , )
if accelerator.is_main_process:
__a = []
for task in tqdm(range(a__ ) ):
__a = human_eval['''test'''][task]['''test''']
__a = F"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
__a , __a = code_eval_metric.compute(
references=a__ , predictions=a__ , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(a__ , a__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 33
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A : str = parser.parse_args()
if args.model_type == "roberta":
A : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
A : Any = 'roberta'
elif args.model_type == "gpt2":
A : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
A : List[str] = 'transformer'
A : Dict = model.state_dict()
A : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A : Union[str, Any] = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A : Any = F"{prefix}.embeddings.{w}.weight"
A : Union[str, Any] = state_dict[param_name]
for w in ["weight", "bias"]:
A : List[Any] = F"{prefix}.embeddings.LayerNorm.{w}"
A : List[str] = state_dict[param_name]
# Transformer Blocks #
A : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A : Any = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
A : List[str] = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A : Optional[int] = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F"lm_head.dense.{w}"]
A : List[str] = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A : List[str] = state_dict[F"{prefix}.ln_f.{w}"]
A : Dict = state_dict['lm_head.weight']
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 33
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __A:
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ) -> List[Any]:
'''simple docstring'''
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = True
__a = 99
__a = 384
__a = 2
__a = 4
__a = 37
__a = '''gelu'''
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = 128
__a = 2
__a = 9
__a = 1
__a = None
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = TFConvBertModel(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = [input_ids, input_mask]
__a = model(_snake_case )
__a = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = TFConvBertForMaskedLM(config=_snake_case )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Dict:
'''simple docstring'''
__a = self.num_labels
__a = TFConvBertForSequenceClassification(config=_snake_case )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = self.num_choices
__a = TFConvBertForMultipleChoice(config=_snake_case )
__a = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
__a = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
__a = self.num_labels
__a = TFConvBertForTokenClassification(config=_snake_case )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
__a = TFConvBertForQuestionAnswering(config=_snake_case )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A( a , a , unittest.TestCase ):
snake_case_ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = TFConvBertModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = True
if hasattr(_snake_case , '''use_cache''' ):
__a = True
__a = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__a = getattr(self.model_tester , '''key_length''' , _snake_case )
for model_class in self.all_model_classes:
__a = self._prepare_for_class(_snake_case , _snake_case )
__a = model_class(_snake_case )
__a = len(model(_snake_case ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case , saved_model=_snake_case )
__a = os.path.join(_snake_case , '''saved_model''' , '''1''' )
__a = tf.keras.models.load_model(_snake_case )
__a = model(_snake_case )
if self.is_encoder_decoder:
__a = outputs['''encoder_hidden_states''']
__a = outputs['''encoder_attentions''']
else:
__a = outputs['''hidden_states''']
__a = outputs['''attentions''']
self.assertEqual(len(_snake_case ) , _snake_case )
__a = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_snake_case ) , _snake_case )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
__a = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__a = getattr(self.model_tester , '''key_length''' , _snake_case )
__a = getattr(self.model_tester , '''key_length''' , _snake_case )
def check_decoder_attentions_output(_snake_case ):
__a = len(_snake_case )
self.assertEqual(out_len % 2 , 0 )
__a = outputs.decoder_attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_snake_case ):
__a = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__a = True
__a = False
__a = model_class(_snake_case )
__a = model(self._prepare_for_class(_snake_case , _snake_case ) )
__a = len(_snake_case )
self.assertEqual(config.output_hidden_states , _snake_case )
check_encoder_attentions_output(_snake_case )
if self.is_encoder_decoder:
__a = model_class(_snake_case )
__a = model(self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(config.output_hidden_states , _snake_case )
check_decoder_attentions_output(_snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_snake_case )
__a = model(self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(config.output_hidden_states , _snake_case )
check_encoder_attentions_output(_snake_case )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_snake_case )
__a = model(self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_snake_case ) )
self.assertEqual(model.config.output_hidden_states , _snake_case )
check_encoder_attentions_output(_snake_case )
@require_tf
class __A( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(_snake_case )[0]
__a = [1, 6, 768]
self.assertEqual(output.shape , _snake_case )
__a = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _snake_case , atol=1E-4 )
| 33
|
import os
import numpy
import onnx
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = a.name
__a = b.name
__a = ''''''
__a = ''''''
__a = a == b
__a = name_a
__a = name_b
return res
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> str:
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Union[str, Any]:
__a = list(model.graph.initializer )
__a = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__a = inits[i].name
__a = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def __lowerCAmelCase ( a__ ) -> str:
__a = os.path.dirname(a__ )
__a = os.path.basename(a__ )
__a = onnx.load(os.path.join(a__ , a__ ) )
__a = list(model.graph.initializer )
__a = set()
__a = {}
__a = []
__a = 0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
__a = inits[j].data_type
__a = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , a__ )
total_reduced_size += mem_size
__a = inits[i].name
__a = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
__a = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
__a = sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
__a = '''optimized_''' + model_file_name
__a = os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model
| 33
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
A : Union[str, Any] = logging.getLogger(__name__)
class __A( a ):
snake_case_ = '''masked_bert'''
def __init__( self , _snake_case=30_522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="topK" , _snake_case="constant" , _snake_case=0.0 , **_snake_case , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , **_snake_case )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 33
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Dict = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __A:
def __init__( self , _snake_case , ) -> Union[str, Any]:
'''simple docstring'''
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = True
__a = True
__a = False
__a = False
__a = False
__a = 2
__a = 99
__a = 0
__a = 32
__a = 2
__a = 4
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = '''last'''
__a = True
__a = None
__a = 0
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__a = None
if self.use_input_lengths:
__a = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Tuple:
'''simple docstring'''
__a = TFFlaubertModel(config=_snake_case )
__a = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__a = model(_snake_case )
__a = [input_ids, input_mask]
__a = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Union[str, Any]:
'''simple docstring'''
__a = TFFlaubertWithLMHeadModel(_snake_case )
__a = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[str]:
'''simple docstring'''
__a = TFFlaubertForQuestionAnsweringSimple(_snake_case )
__a = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__a = model(_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> int:
'''simple docstring'''
__a = TFFlaubertForSequenceClassification(_snake_case )
__a = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> int:
'''simple docstring'''
__a = self.num_labels
__a = TFFlaubertForTokenClassification(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[Any]:
'''simple docstring'''
__a = self.num_choices
__a = TFFlaubertForMultipleChoice(config=_snake_case )
__a = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
__a = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __A( a , a , unittest.TestCase ):
snake_case_ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case_ = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = TFFlaubertModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , emb_dim=37 )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFFlaubertModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class __A( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
__a = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__a = model(_snake_case )[0]
__a = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice.
__a = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 33
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A : str = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
A : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( ) -> Tuple:
__a = '''https://pypi.org/pypi/diffusers/json'''
__a = json.loads(request.urlopen(a__ ).read() )['''releases'''].keys()
return sorted(a__ , key=lambda a__ : version.Version(a__ ) )
def __lowerCAmelCase ( ) -> List[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a__ )
os.makedirs(a__ , exist_ok=a__ )
__a = Path(a__ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
init_hf_modules()
__a = Path(a__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a__ , exist_ok=a__ )
__a = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import .xxx`
__a = re.findall('''^\s*import\s+\.(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Unique-ify
return list(set(a__ ) )
def __lowerCAmelCase ( a__ ) -> Any:
__a = False
__a = [module_file]
__a = []
# Let's recurse through all relative imports
while not no_change:
__a = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a__ ) )
__a = Path(a__ ).parent
__a = [str(module_path / m ) for m in new_imports]
__a = [f for f in new_import_files if f not in all_relative_imports]
__a = [F"""{f}.py""" for f in new_import_files]
__a = len(a__ ) == 0
all_relative_imports.extend(a__ )
return all_relative_imports
def __lowerCAmelCase ( a__ ) -> str:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import xxx`
__a = re.findall('''^\s*import\s+(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Only keep the top-level module
__a = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__a = list(set(a__ ) )
__a = []
for imp in imports:
try:
importlib.import_module(a__ )
except ImportError:
missing_packages.append(a__ )
if len(a__ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F"""{', '.join(a__ )}. Run `pip install {' '.join(a__ )}`""" )
return get_relative_imports(a__ )
def __lowerCAmelCase ( a__ , a__ ) -> Dict:
__a = module_path.replace(os.path.sep , '''.''' )
__a = importlib.import_module(a__ )
if class_name is None:
return find_pipeline_class(a__ )
return getattr(a__ , a__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
from ..pipelines import DiffusionPipeline
__a = dict(inspect.getmembers(a__ , inspect.isclass ) )
__a = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a__ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
__a = cls
return pipeline_class
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , ) -> Tuple:
__a = str(a__ )
__a = os.path.join(a__ , a__ )
if os.path.isfile(a__ ):
__a = module_file_or_url
__a = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__a = get_diffusers_versions()
# cut ".dev0"
__a = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__a = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__a = F"""v{revision}"""
elif revision == "main":
__a = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
__a = COMMUNITY_PIPELINES_URL.format(revision=a__ , pipeline=a__ )
try:
__a = cached_download(
a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = '''git'''
__a = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__a = hf_hub_download(
a__ , a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__a = check_imports(a__ )
# Now we move the module inside our cached dynamic modules.
__a = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a__ )
__a = Path(a__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a__ , submodule_path / module_file )
for module_needed in modules_needed:
__a = F"""{module_needed}.py"""
shutil.copy(os.path.join(a__ , a__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a__ , a__ ):
__a = use_auth_token
elif use_auth_token is True:
__a = HfFolder.get_token()
else:
__a = None
__a = model_info(a__ , revision=a__ , token=a__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__a = submodule_path / commit_hash
__a = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a__ )
if not (submodule_path / module_file).exists():
shutil.copy(a__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a__ , F"""{module_needed}.py""" , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return os.path.join(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ) -> Tuple:
__a = get_cached_module_file(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return get_class_in_module(a__ , final_module.replace('''.py''' , '''''' ) )
| 33
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['PerceiverFeatureExtractor']
A : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : List[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __A( a ):
snake_case_ = '''table-transformer'''
snake_case_ = ['''past_key_values''']
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=100 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , **_snake_case , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
__a = backbone_config.get('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_snake_case )
# set timm attributes to None
__a , __a , __a = None, None, None
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = encoder_layers
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return 12
| 33
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : int = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ['MobileViTFeatureExtractor']
A : str = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
|
import functools
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = len(a__ )
__a = len(a__ )
@functools.cache
def min_distance(a__ , a__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a__ ) , 1 + min_distance(a__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : Union[str, Any] = logging.get_logger(__name__)
A : List[str] = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class __A( a , a ):
snake_case_ = '''focalnet'''
def __init__( self , _snake_case=224 , _snake_case=4 , _snake_case=3 , _snake_case=96 , _snake_case=False , _snake_case=[192, 384, 768, 768] , _snake_case=[2, 2, 6, 2] , _snake_case=[2, 2, 2, 2] , _snake_case=[3, 3, 3, 3] , _snake_case="gelu" , _snake_case=4.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=False , _snake_case=1E-4 , _snake_case=False , _snake_case=False , _snake_case=False , _snake_case=0.02 , _snake_case=1E-5 , _snake_case=32 , _snake_case=None , _snake_case=None , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = use_conv_embed
__a = hidden_sizes
__a = depths
__a = focal_levels
__a = focal_windows
__a = hidden_act
__a = mlp_ratio
__a = hidden_dropout_prob
__a = drop_path_rate
__a = use_layerscale
__a = layerscale_value
__a = use_post_layernorm
__a = use_post_layernorm_in_modulation
__a = normalize_modulator
__a = initializer_range
__a = layer_norm_eps
__a = encoder_stride
__a = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
| 33
|
import logging
from transformers.configuration_utils import PretrainedConfig
A : Union[str, Any] = logging.getLogger(__name__)
class __A( a ):
snake_case_ = '''masked_bert'''
def __init__( self , _snake_case=30_522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="topK" , _snake_case="constant" , _snake_case=0.0 , **_snake_case , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , **_snake_case )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 33
| 1
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A:
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=24 , _snake_case=2 , _snake_case=6 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=None , _snake_case=1_000 , ) -> int:
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = scope
__a = range_bbox
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a = bbox[i, j, 3]
__a = bbox[i, j, 1]
__a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__a = bbox[i, j, 2]
__a = bbox[i, j, 0]
__a = t
__a = None
if self.use_input_mask:
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Any:
'''simple docstring'''
__a = LiltModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = model(_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
__a = model(_snake_case , bbox=_snake_case , token_type_ids=_snake_case )
__a = model(_snake_case , bbox=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[str]:
'''simple docstring'''
__a = self.num_labels
__a = LiltForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = model(
_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[Any]:
'''simple docstring'''
__a = LiltForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = model(
_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __A( a , a , a , unittest.TestCase ):
snake_case_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = LiltModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = LiltModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_snake_case )
__a = torch.tensor([[1, 2]] , device=_snake_case )
__a = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_snake_case )
# forward pass
with torch.no_grad():
__a = model(input_ids=_snake_case , bbox=_snake_case )
__a = torch.Size([1, 2, 768] )
__a = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_snake_case , )
self.assertTrue(outputs.last_hidden_state.shape , _snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _snake_case , atol=1E-3 ) )
| 33
|
import sys
def __lowerCAmelCase ( a__ ) -> Optional[int]:
__a = len(a__ )
__a = [[0 for x in range(a__ )] for x in range(a__ )]
__a = [[0 for x in range(a__ )] for x in range(a__ )]
for chain_length in range(2 , a__ ):
for a in range(1 , n - chain_length + 1 ):
__a = a + chain_length - 1
__a = sys.maxsize
for c in range(a__ , a__ ):
__a = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__a = cost
__a = c
return matrix, sol
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Any:
if i == j:
print('''A''' + str(a__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(a__ , a__ , optimal_solution[i][j] )
print_optiomal_solution(a__ , optimal_solution[i][j] + 1 , a__ )
print(''')''' , end=''' ''' )
def __lowerCAmelCase ( ) -> int:
__a = [30, 35, 15, 5, 10, 20, 25]
__a = len(a__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__a , __a = matrix_chain_order(a__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(a__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 33
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCAmelCase ( a__ , a__=1 ) -> Any:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def __lowerCAmelCase ( a__ , a__=0 ) -> Any:
__a = []
for old_item in old_list:
__a = old_item.replace('''in_layers.0''' , '''norm1''' )
__a = new_item.replace('''in_layers.2''' , '''conv1''' )
__a = new_item.replace('''out_layers.0''' , '''norm2''' )
__a = new_item.replace('''out_layers.3''' , '''conv2''' )
__a = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
__a = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
__a = shave_segments(a__ , n_shave_prefix_segments=a__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __lowerCAmelCase ( a__ , a__=0 ) -> Tuple:
__a = []
for old_item in old_list:
__a = old_item
__a = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
__a = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
__a = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
__a = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
__a = shave_segments(a__ , n_shave_prefix_segments=a__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __lowerCAmelCase ( a__ , a__ , a__ , a__=None , a__=None , a__=None ) -> str:
assert isinstance(a__ , a__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__a = old_checkpoint[path]
__a = old_tensor.shape[0] // 3
__a = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__a = old_tensor.shape[0] // config['''num_head_channels'''] // 3
__a = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__a , __a , __a = old_tensor.split(channels // num_heads , dim=1 )
__a = query.reshape(a__ )
__a = key.reshape(a__ )
__a = value.reshape(a__ )
for path in paths:
__a = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__a = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
__a = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
__a = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
__a = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__a = old_checkpoint[path['''old''']][:, :, 0]
else:
__a = old_checkpoint[path['''old''']]
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = {}
__a = checkpoint['''time_embed.0.weight''']
__a = checkpoint['''time_embed.0.bias''']
__a = checkpoint['''time_embed.2.weight''']
__a = checkpoint['''time_embed.2.bias''']
__a = checkpoint['''input_blocks.0.0.weight''']
__a = checkpoint['''input_blocks.0.0.bias''']
__a = checkpoint['''out.0.weight''']
__a = checkpoint['''out.0.bias''']
__a = checkpoint['''out.2.weight''']
__a = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
__a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
__a = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(a__ )
}
# Retrieves the keys for the middle blocks only
__a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
__a = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(a__ )
}
# Retrieves the keys for the output blocks only
__a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
__a = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(a__ )
}
for i in range(1 , a__ ):
__a = (i - 1) // (config['''num_res_blocks'''] + 1)
__a = (i - 1) % (config['''num_res_blocks'''] + 1)
__a = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
__a = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
__a = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
__a = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
__a = renew_resnet_paths(a__ )
__a = {'''old''': F"""input_blocks.{i}.0""", '''new''': F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
__a = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
a__ , a__ , a__ , additional_replacements=[meta_path, resnet_op] , config=a__ )
if len(a__ ):
__a = renew_attention_paths(a__ )
__a = {
'''old''': F"""input_blocks.{i}.1""",
'''new''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__a = {
F"""input_blocks.{i}.1.qkv.bias""": {
'''key''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
'''key''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
a__ , a__ , a__ , additional_replacements=[meta_path] , attention_paths_to_split=a__ , config=a__ , )
__a = middle_blocks[0]
__a = middle_blocks[1]
__a = middle_blocks[2]
__a = renew_resnet_paths(a__ )
assign_to_checkpoint(a__ , a__ , a__ , config=a__ )
__a = renew_resnet_paths(a__ )
assign_to_checkpoint(a__ , a__ , a__ , config=a__ )
__a = renew_attention_paths(a__ )
__a = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
a__ , a__ , a__ , attention_paths_to_split=a__ , config=a__ )
for i in range(a__ ):
__a = i // (config['''num_res_blocks'''] + 1)
__a = i % (config['''num_res_blocks'''] + 1)
__a = [shave_segments(a__ , 2 ) for name in output_blocks[i]]
__a = {}
for layer in output_block_layers:
__a , __a = layer.split('''.''' )[0], shave_segments(a__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(a__ )
else:
__a = [layer_name]
if len(a__ ) > 1:
__a = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
__a = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
__a = renew_resnet_paths(a__ )
__a = renew_resnet_paths(a__ )
__a = {'''old''': F"""output_blocks.{i}.0""", '''new''': F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__a = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
__a = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
__a = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(a__ ) == 2:
__a = []
if len(a__ ):
__a = renew_attention_paths(a__ )
__a = {
'''old''': F"""output_blocks.{i}.1""",
'''new''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__a = {
F"""output_blocks.{i}.1.qkv.bias""": {
'''key''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
'''key''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
a__ , a__ , a__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=a__ , )
else:
__a = renew_resnet_paths(a__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__a = '''.'''.join(['''output_blocks''', str(a__ ), path['''old''']] )
__a = '''.'''.join(['''up_blocks''', str(a__ ), '''resnets''', str(a__ ), path['''new''']] )
__a = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
A : str = parser.parse_args()
A : int = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A : int = json.loads(f.read())
A : int = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A : str = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A : Optional[Any] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
A : Any = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
A : List[str] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : int = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ['MobileViTFeatureExtractor']
A : str = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A : List[Any] = logging.get_logger(__name__)
A : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
A : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Dict:
for attribute in key.split('''.''' ):
__a = getattr(a__ , a__ )
if weight_type is not None:
__a = getattr(a__ , a__ ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(a__ , a__ , a__ , a__ )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
__a = '''weight'''
else:
__a = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Any:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> str:
__a = full_name.split('''adaptor.''' )[-1]
__a = name.split('''.''' )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
__a = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
__a = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
__a = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(a__ , a__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
__a = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
__a = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
__a , __a = emb.weight.shape
__a = nn.Linear(a__ , a__ , bias=a__ )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> Union[str, Any]:
__a = WavaVecaConfig.from_pretrained(
a__ , add_adapter=a__ , adapter_stride=a__ , adapter_kernel_size=a__ , use_auth_token=a__ , output_hidden_size=a__ , )
__a = MBartConfig.from_pretrained(a__ )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a__ , use_auth_token=a__ )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a__ )
recursively_load_weights_wavaveca(model.encoder , a__ )
# load decoder weights
__a = MBartForCausalLM(a__ )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a__ )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__a = SpeechEncoderDecoderModel(encoder=a__ , decoder=a__ )
__a = False
__a = MBartaaTokenizer(a__ )
tokenizer.save_pretrained(a__ )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = '''mbart50'''
__a = '''wav2vec2'''
__a = tokenizer.eos_token_id
__a = 25_0004
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a__ )
hf_wavavec.save_pretrained(a__ )
feature_extractor.save_pretrained(a__ )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1_0_2_4, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=2_5_0_0_0_4, type=int, help='`decoder_start_token_id` of model config')
A : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 33
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> np.ndarray:
__a = cva.getAffineTransform(a__ , a__ )
return cva.warpAffine(a__ , a__ , (rows, cols) )
if __name__ == "__main__":
# read original image
A : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A : Any = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A , A : List[Any] = gray_img.shape
# set different points to rotate image
A : str = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
A : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
A : Tuple = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
A : Tuple = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
A : Tuple = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A : Union[str, Any] = plt.figure(1)
A : str = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 33
| 1
|
def __lowerCAmelCase ( a__ , a__ ) -> int:
while b:
__a , __a = b, a % b
return a
def __lowerCAmelCase ( a__ , a__ ) -> int:
return a if b == 0 else euclidean_gcd_recursive(a__ , a % b )
def __lowerCAmelCase ( ) -> List[Any]:
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 33
|
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ = None ) -> list[list[str]]:
__a = word_bank or []
# create a table
__a = len(a__ ) + 1
__a = []
for _ in range(a__ ):
table.append([] )
# seed value
__a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
__a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 33
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __lowerCAmelCase ( a__ ) -> Union[str, Any]:
__a = os.path.join(args.tf_model_dir , '''parameters.json''' )
__a = json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
__a = args.output + '''.pt'''
__a = OrderedDict()
with tf.device('''/CPU:0''' ):
__a = tf.train.load_checkpoint(args.tf_model_dir )
__a = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
__a = reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
__a = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
__a = 8
__a = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
__a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__a = torch.tensor(a__ )
elif key_name.startswith('''model/moe''' ):
__a = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
__a = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
__a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__a = torch.tensor(a__ )
elif key_name.endswith('''/softmlp/kernel''' ):
__a = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
__a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__a = torch.tensor(a__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
__a = key_name[-9:-7]
for i in range(16 ):
__a = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
__a = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
__a = torch.tensor(a__ )
elif key_name.startswith('''model/mlp''' ):
__a = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
__a = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
__a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__a = torch.tensor(a__ )
elif key_name.endswith('''/p1/bias''' ):
__a = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
__a = vnp.copy() # same because it is one dimensional
__a = torch.tensor(a__ )
elif key_name.endswith('''/p2/kernel''' ):
__a = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
__a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__a = torch.tensor(a__ )
elif key_name.endswith('''/p2/bias''' ):
__a = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
__a = vnp.copy() # same because it is one dimensional
__a = torch.tensor(a__ )
elif key_name.startswith('''model/ln''' ):
__a = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
__a = '''model.blocks.%d.feed_forward.norm.bias''' % player
__a = vnp.copy() # same because it is one dimensional
__a = torch.tensor(a__ )
elif key_name.endswith('''/g''' ):
__a = '''model.blocks.%d.feed_forward.norm.weight''' % player
__a = vnp.copy() # same because it is one dimensional
__a = torch.tensor(a__ )
elif key_name.startswith('''model/att''' ):
__a = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
__a = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
__a = state[:, 0, :, :]
__a = state[:, 1, :, :]
__a = state[:, 2, :, :]
__a = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__a = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__a = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__a = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
__a = torch.tensor(a__ )
__a = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
__a = torch.tensor(a__ )
__a = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
__a = torch.tensor(a__ )
elif key_name.endswith('''/o/kernel''' ):
__a = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
__a = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
__a = torch.tensor(a__ )
elif key_name.startswith('''model/an''' ):
__a = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
__a = '''model.blocks.%d.self_attn.norm.bias''' % player
__a = vnp.copy() # same because it is one dimensional
__a = torch.tensor(a__ )
elif key_name.endswith('''/g''' ):
__a = '''model.blocks.%d.self_attn.norm.weight''' % player
__a = vnp.copy() # same because it is one dimensional
__a = torch.tensor(a__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
__a = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
__a = '''model.%s.weight''' % nlayer
__a = vnp.copy() # same in embedded
__a = torch.tensor(a__ )
if key_name.startswith('''model/wte''' ):
__a = '''lm_head.weight'''
__a = vnp.copy() # same in embedded
__a = torch.tensor(a__ )
elif key_name.startswith('''model/wob''' ):
__a = '''final_logits_bias'''
__a = vnp.copy() # same in embedded
__a = state.reshape((1, -1) )
__a = torch.tensor(a__ )
elif key_name == "model/dense/kernel":
__a = '''model.last_project.weight'''
__a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__a = torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
__a = '''model.last_project.bias'''
__a = vnp.copy() # same because it is one dimensional
__a = torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
A : str = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 33
|
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( a__ ) -> List[str]:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += [key]
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
def __lowerCAmelCase ( *a__ ) -> str:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += keys
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
class __A( a ):
def __new__( cls , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , '''key_handler''' ):
setattr(_snake_case , '''key_handler''' , {} )
setattr(_snake_case , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__a = getattr(_snake_case , '''handle_key''' , [] )
for key in handled_keys:
__a = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> List[str]:
'''simple docstring'''
__a = get_character()
if char != KEYMAP["undefined"]:
__a = ord(_snake_case )
__a = cls.key_handler.get(_snake_case )
if handler:
__a = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 33
| 1
|
from math import pow, sqrt
def __lowerCAmelCase ( *a__ ) -> bool:
__a = len(a__ ) > 0 and all(value > 0.0 for value in values )
return result
def __lowerCAmelCase ( a__ , a__ ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def __lowerCAmelCase ( a__ , a__ , a__ ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __lowerCAmelCase ( a__ , a__ , a__ ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __lowerCAmelCase ( a__ , a__ , a__ ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __lowerCAmelCase ( a__ , a__ , a__ ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
from functools import lru_cache
def __lowerCAmelCase ( a__ ) -> set:
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(a__ )
if n > 1:
factors.add(a__ )
return factors
@lru_cache
def __lowerCAmelCase ( a__ ) -> int:
return len(unique_prime_factors(a__ ) )
def __lowerCAmelCase ( a__ ) -> bool:
return len(set(a__ ) ) in (0, 1)
def __lowerCAmelCase ( a__ ) -> list:
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(a__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(a__ ) for x in group]
checker.append(a__ )
# If all numbers in the list are equal, return the group variable.
if equality(a__ ):
return group
# Increment our base variable by 1
base += 1
def __lowerCAmelCase ( a__ = 4 ) -> int:
__a = run(a__ )
return results[0] if len(a__ ) else None
if __name__ == "__main__":
print(solution())
| 33
|
A : Optional[Any] = tuple[float, float, float]
A : Union[str, Any] = tuple[float, float, float]
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = end_pointa[0] - end_pointa[0]
__a = end_pointa[1] - end_pointa[1]
__a = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = ab[1] * ac[2] - ab[2] * ac[1] # *i
__a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__a = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> bool:
return tuple(round(a__ , a__ ) for x in vector ) == (0, 0, 0)
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 10 ) -> bool:
__a = create_vector(a__ , a__ )
__a = create_vector(a__ , a__ )
return is_zero_vector(get_ad_vectors_cross(a__ , a__ ) , a__ )
| 33
| 1
|
from ..utils import DummyObject, requires_backends
class __A( metaclass=a ):
snake_case_ = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *_snake_case , **_snake_case ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 33
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
A : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
A : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Dict:
for attribute in key.split('''.''' ):
__a = getattr(a__ , a__ )
if weight_type is not None:
__a = getattr(a__ , a__ ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__a = True
if "*" in mapped_key:
__a = name.split(a__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = '''weight'''
else:
__a = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> int:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True ) -> Tuple:
if config_path is not None:
__a = UniSpeechSatConfig.from_pretrained(a__ )
else:
__a = UniSpeechSatConfig()
__a = ''''''
if is_finetuned:
__a = UniSpeechSatForCTC(a__ )
else:
__a = UniSpeechSatForPreTraining(a__ )
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__a = model[0].eval()
recursively_load_weights(a__ , a__ )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A : Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 33
| 1
|
import numpy
class __A:
def __init__( self , _snake_case , _snake_case ) -> None:
'''simple docstring'''
__a = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__a = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__a = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__a = numpy.random.rand(3 , 1 )
# Real output values provided.
__a = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__a = numpy.zeros(output_array.shape )
def SCREAMING_SNAKE_CASE_ ( self ) -> numpy.ndarray:
'''simple docstring'''
__a = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__a = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__a = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def SCREAMING_SNAKE_CASE_ ( self ) -> None:
'''simple docstring'''
__a = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__a = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__a = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
__a = self.feedforward()
self.back_propagation()
if give_loss:
__a = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = input_arr
__a = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__a = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__a = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __lowerCAmelCase ( a__ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def __lowerCAmelCase ( a__ ) -> numpy.ndarray:
return (value) * (1 - (value))
def __lowerCAmelCase ( ) -> int:
__a = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__a = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__a = TwoHiddenLayerNeuralNetwork(
input_array=a__ , output_array=a__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a__ , iterations=10 , give_loss=a__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 33
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A( a ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a = bertabert.config.encoder.vocab_size
__a = tokenizer.sep_token_id
__a = tokenizer.cls_token_id
__a = 128
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a = train_dataset.select(range(32 ) )
__a = val_dataset.select(range(16 ) )
__a = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
__a = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
__a = inputs.input_ids
__a = inputs.attention_mask
__a = outputs.input_ids
__a = outputs.input_ids.copy()
__a = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
__a = pred.label_ids
__a = pred.predictions
# all unnecessary tokens are removed
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
__a = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a = self.get_auto_remove_tmp_dir()
__a = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 33
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : Union[str, Any] = logging.get_logger(__name__)
A : List[str] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __A( a ):
snake_case_ = '''deta'''
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _snake_case=None , _snake_case=900 , _snake_case=2_048 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=6 , _snake_case=1_024 , _snake_case=8 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=True , _snake_case=False , _snake_case="sine" , _snake_case=5 , _snake_case=4 , _snake_case=4 , _snake_case=True , _snake_case=300 , _snake_case=True , _snake_case=True , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , _snake_case=0.25 , **_snake_case , ) -> str:
'''simple docstring'''
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_snake_case , _snake_case ):
__a = backbone_config.pop('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_snake_case )
__a = backbone_config
__a = num_queries
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = auxiliary_loss
__a = position_embedding_type
# deformable attributes
__a = num_feature_levels
__a = encoder_n_points
__a = decoder_n_points
__a = two_stage
__a = two_stage_num_proposals
__a = with_box_refine
__a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
__a = focal_alpha
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.d_model
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 33
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
from math import ceil, sqrt
def __lowerCAmelCase ( a__ = 100_0000 ) -> int:
__a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"{solution() = }")
| 33
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __A( a ):
snake_case_ = 0
snake_case_ = False
snake_case_ = 3.0
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _snake_case )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
A : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
A : Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
A : int = torch.nn.Linear(1_0_0, 2_0_0)
A : Optional[int] = accelerator.prepare(model)
# Check the values changed in kwargs
A : List[Any] = ''
A : Tuple = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 33
| 1
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __lowerCAmelCase ( a__ , a__ ) -> Dict:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__a = flax_key_tuple[:-1] + ('''weight''',)
__a = torch.permute(a__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a__ ):
# linear layer
__a = flax_key_tuple[:-1] + ('''weight''',)
__a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__a = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
if "metadata" in layer:
__a = layer.split('''metadata''' )
__a = ''''''.join(split_layer[0] )[:-1]
__a = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__a = layer.split('''kvstore''' )
__a = ''''''.join(split_layer[0] )[:-1]
__a = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__a = layer.split('''/''' )
__a = '''/'''.join(split_layer[:-1] )
__a = (split_layer[-1],)
if "kvstore/path" in layer:
__a = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__a = '''file'''
else:
__a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __lowerCAmelCase ( a__ , a__ ) -> Optional[int]:
__a = rename_keys(a__ )
__a = {}
for k, v in current_block.items():
__a = v
__a = new_current_block
torch.save(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ = WEIGHTS_NAME ) -> List[Any]:
__a = convert_file_size_to_int(a__ )
__a = []
__a = {}
__a = 0
__a = 0
os.makedirs(a__ , exist_ok=a__ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__a = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__a = flatten_dict(a__ , sep='''/''' )
__a = {}
for layer in checkpoint_info.keys():
__a , __a , __a = get_key_and_tensorstore_dict(
a__ , a__ , a__ )
if curr_real_layer_name in all_layers:
__a = content
else:
__a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__a = torch.tensor(a__ )
__a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__a , __a = rename_base_flax_keys(tuple(key.split('''/''' ) ) , a__ )
__a = '''/'''.join(a__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__a = os.path.join(
a__ , weights_name.replace('''.bin''' , F"""-{len(a__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(a__ , a__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__a = {}
__a = 0
__a = raw_weights.to(getattr(a__ , a__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__a = os.path.join(a__ , weights_name.replace('''.bin''' , F"""-{len(a__ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(a__ , a__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__a = {}
__a = {}
for idx, shard in enumerate(a__ ):
__a = weights_name.replace(
'''.bin''' , F"""-{idx+1:05d}-of-{len(a__ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__a = os.path.join(a__ , weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(a__ , os.path.join(a__ , a__ ) )
__a = shard
for key in shard:
__a = shard_file
# Add the metadata
__a = {'''total_size''': total_size}
__a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(a__ , a__ ) , '''w''' , encoding='''utf-8''' ) as f:
__a = json.dumps(a__ , indent=2 , sort_keys=a__ ) + '''\n'''
f.write(a__ )
return metadata, index
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A : Optional[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __lowerCAmelCase ( ) -> Optional[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__a = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__a = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__a = TaTokenizer.from_pretrained('''t5-small''' )
__a = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__a = tokenizer(a__ , return_tensors='''pt''' ).input_ids
__a = model.generate(a__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 33
|
import os
# Precomputes a list of the 100 first triangular numbers
A : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __lowerCAmelCase ( ) -> Tuple:
__a = os.path.dirname(os.path.realpath(a__ ) )
__a = os.path.join(a__ , '''words.txt''' )
__a = ''''''
with open(a__ ) as f:
__a = f.readline()
__a = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
__a = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 33
| 1
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __A( a , a , unittest.TestCase ):
snake_case_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=False ) -> Optional[int]:
'''simple docstring'''
__a = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
__a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __A( a ):
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ) -> Dict:
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
__a = embedding_size
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[str]:
'''simple docstring'''
__a = TFMobileBertModel(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = model(_snake_case )
__a = [input_ids, input_mask]
__a = model(_snake_case )
__a = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = TFMobileBertForMaskedLM(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = TFMobileBertForNextSentencePrediction(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = TFMobileBertForPreTraining(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = model(_snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
'''simple docstring'''
__a = self.num_labels
__a = TFMobileBertForSequenceClassification(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
__a = self.num_choices
__a = TFMobileBertForMultipleChoice(config=_snake_case )
__a = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
__a = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[str]:
'''simple docstring'''
__a = self.num_labels
__a = TFMobileBertForTokenClassification(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[str]:
'''simple docstring'''
__a = TFMobileBertForQuestionAnswering(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = model(_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = TFMobileBertModelTest.TFMobileBertModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
__a = TFMobileBertModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_tf
class __A( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(_snake_case )[0]
__a = [1, 6, 30_522]
self.assertEqual(output.shape , _snake_case )
__a = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _snake_case , atol=1E-4 )
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['PerceiverFeatureExtractor']
A : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
from __future__ import annotations
import bisect
def __lowerCAmelCase ( a__ , a__ , a__ = 0 , a__ = -1 ) -> int:
if hi < 0:
__a = len(a__ )
while lo < hi:
__a = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__a = mid + 1
else:
__a = mid
return lo
def __lowerCAmelCase ( a__ , a__ , a__ = 0 , a__ = -1 ) -> int:
if hi < 0:
__a = len(a__ )
while lo < hi:
__a = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__a = mid + 1
else:
__a = mid
return lo
def __lowerCAmelCase ( a__ , a__ , a__ = 0 , a__ = -1 ) -> None:
sorted_collection.insert(bisect_left(a__ , a__ , a__ , a__ ) , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ = 0 , a__ = -1 ) -> None:
sorted_collection.insert(bisect_right(a__ , a__ , a__ , a__ ) , a__ )
def __lowerCAmelCase ( a__ , a__ ) -> int | None:
__a = 0
__a = len(a__ ) - 1
while left <= right:
__a = left + (right - left) // 2
__a = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__a = midpoint - 1
else:
__a = midpoint + 1
return None
def __lowerCAmelCase ( a__ , a__ ) -> int | None:
__a = bisect.bisect_left(a__ , a__ )
if index != len(a__ ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> int | None:
if right < left:
return None
__a = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(a__ , a__ , a__ , midpoint - 1 )
else:
return binary_search_by_recursion(a__ , a__ , midpoint + 1 , a__ )
if __name__ == "__main__":
A : List[str] = input('Enter numbers separated by comma:\n').strip()
A : Optional[Any] = sorted(int(item) for item in user_input.split(','))
A : str = int(input('Enter a single number to be found in the list:\n'))
A : str = binary_search(collection, target)
if result is None:
print(F"{target} was not found in {collection}.")
else:
print(F"{target} was found at position {result} in {collection}.")
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __A:
def __init__( self , _snake_case , _snake_case=13 , _snake_case=10 , _snake_case=3 , _snake_case=2 , _snake_case=2 , _snake_case=2 , _snake_case=True , _snake_case=True , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=10 , _snake_case=0.02 , _snake_case=0.9 , _snake_case=None , ) -> Tuple:
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = num_channels
__a = patch_size
__a = tubelet_size
__a = num_frames
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = mask_ratio
__a = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__a = (image_size // patch_size) ** 2
__a = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__a = int(mask_ratio * self.seq_length )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = VideoMAEModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = VideoMAEForPreTraining(_snake_case )
model.to(_snake_case )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__a = torch.ones((self.num_masks,) )
__a = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__a = mask.expand(self.batch_size , -1 ).bool()
__a = model(_snake_case , _snake_case )
# model only returns predictions for masked patches
__a = mask.sum().item()
__a = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A( a , a , unittest.TestCase ):
snake_case_ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
snake_case_ = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = VideoMAEModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=False ) -> Tuple:
'''simple docstring'''
__a = copy.deepcopy(_snake_case )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__a = torch.ones((self.model_tester.num_masks,) )
__a = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__a = mask.expand(self.model_tester.batch_size , -1 ).bool()
__a = bool_masked_pos.to(_snake_case )
if return_labels:
if model_class in [
*get_values(_snake_case ),
]:
__a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_snake_case )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = VideoMAEModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
if not self.has_attentions:
pass
else:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
for model_class in self.all_model_classes:
__a = self.model_tester.seq_length - self.model_tester.num_masks
__a = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__a = True
__a = False
__a = True
__a = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__a = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__a = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__a = len(_snake_case )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
__a = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
__a = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__a = outputs.hidden_states
__a = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_snake_case ) , _snake_case )
__a = self.model_tester.seq_length - self.model_tester.num_masks
__a = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( ) -> int:
__a = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
__a = np.load(a__ )
return list(a__ )
@require_torch
@require_vision
class __A( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
_snake_case )
__a = self.default_image_processor
__a = prepare_video()
__a = image_processor(_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
__a = model(**_snake_case )
# verify the logits
__a = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _snake_case )
__a = torch.tensor([0.3669, -0.0688, -0.2421] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(_snake_case )
__a = self.default_image_processor
__a = prepare_video()
__a = image_processor(_snake_case , return_tensors='''pt''' ).to(_snake_case )
# add boolean mask, indicating which patches to mask
__a = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
__a = torch.load(_snake_case )
# forward pass
with torch.no_grad():
__a = model(**_snake_case )
# verify the logits
__a = torch.Size([1, 1_408, 1_536] )
__a = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=_snake_case )
self.assertEqual(outputs.logits.shape , _snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _snake_case , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__a = torch.tensor([0.5142] , device=_snake_case )
self.assertTrue(torch.allclose(outputs.loss , _snake_case , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=_snake_case ).to(
_snake_case )
with torch.no_grad():
__a = model(**_snake_case )
__a = torch.tensor(torch.tensor([0.6469] ) , device=_snake_case )
self.assertTrue(torch.allclose(outputs.loss , _snake_case , atol=1E-4 ) )
| 33
|
from string import ascii_uppercase
A : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
A : Union[str, Any] = dict(enumerate(ascii_uppercase))
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = len(a__ )
__a = 0
while True:
if x == i:
__a = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__a = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__a = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowerCAmelCase ( ) -> None:
__a = '''THE GERMAN ATTACK'''
__a = '''SECRET'''
__a = generate_key(a__ , a__ )
__a = cipher_text(a__ , a__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(a__ , a__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 33
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A : str = parser.parse_args()
if args.model_type == "roberta":
A : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
A : Any = 'roberta'
elif args.model_type == "gpt2":
A : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
A : List[str] = 'transformer'
A : Dict = model.state_dict()
A : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A : Union[str, Any] = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A : Any = F"{prefix}.embeddings.{w}.weight"
A : Union[str, Any] = state_dict[param_name]
for w in ["weight", "bias"]:
A : List[Any] = F"{prefix}.embeddings.LayerNorm.{w}"
A : List[str] = state_dict[param_name]
# Transformer Blocks #
A : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A : Any = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
A : List[str] = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A : Optional[int] = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F"lm_head.dense.{w}"]
A : List[str] = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A : List[str] = state_dict[F"{prefix}.ln_f.{w}"]
A : Dict = state_dict['lm_head.weight']
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 33
| 1
|
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ ) -> list[tuple[int, int]]:
__a , __a = position
__a = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__a = []
for position in positions:
__a , __a = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(a__ )
return permissible_positions
def __lowerCAmelCase ( a__ ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> bool:
if is_complete(a__ ):
return True
for position in get_valid_pos(a__ , len(a__ ) ):
__a , __a = position
if board[y][x] == 0:
__a = curr + 1
if open_knight_tour_helper(a__ , a__ , curr + 1 ):
return True
__a = 0
return False
def __lowerCAmelCase ( a__ ) -> list[list[int]]:
__a = [[0 for i in range(a__ )] for j in range(a__ )]
for i in range(a__ ):
for j in range(a__ ):
__a = 1
if open_knight_tour_helper(a__ , (i, j) , 1 ):
return board
__a = 0
__a = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
|
import os
import numpy
import onnx
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = a.name
__a = b.name
__a = ''''''
__a = ''''''
__a = a == b
__a = name_a
__a = name_b
return res
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> str:
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Union[str, Any]:
__a = list(model.graph.initializer )
__a = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__a = inits[i].name
__a = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def __lowerCAmelCase ( a__ ) -> str:
__a = os.path.dirname(a__ )
__a = os.path.basename(a__ )
__a = onnx.load(os.path.join(a__ , a__ ) )
__a = list(model.graph.initializer )
__a = set()
__a = {}
__a = []
__a = 0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
__a = inits[j].data_type
__a = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , a__ )
total_reduced_size += mem_size
__a = inits[i].name
__a = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
__a = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
__a = sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
__a = '''optimized_''' + model_file_name
__a = os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model
| 33
| 1
|
def __lowerCAmelCase ( a__ = 100_0000 ) -> int:
__a = set(range(3 , a__ , 2 ) )
primes.add(2 )
for p in range(3 , a__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , a__ , a__ ) ) )
__a = [float(a__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(a__ , limit + 1 , a__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 33
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Dict = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __A( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__a = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(_snake_case )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
__a = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(_snake_case )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1E-3 ) )
| 33
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A : str = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
A : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( ) -> Tuple:
__a = '''https://pypi.org/pypi/diffusers/json'''
__a = json.loads(request.urlopen(a__ ).read() )['''releases'''].keys()
return sorted(a__ , key=lambda a__ : version.Version(a__ ) )
def __lowerCAmelCase ( ) -> List[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a__ )
os.makedirs(a__ , exist_ok=a__ )
__a = Path(a__ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
init_hf_modules()
__a = Path(a__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a__ , exist_ok=a__ )
__a = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import .xxx`
__a = re.findall('''^\s*import\s+\.(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Unique-ify
return list(set(a__ ) )
def __lowerCAmelCase ( a__ ) -> Any:
__a = False
__a = [module_file]
__a = []
# Let's recurse through all relative imports
while not no_change:
__a = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a__ ) )
__a = Path(a__ ).parent
__a = [str(module_path / m ) for m in new_imports]
__a = [f for f in new_import_files if f not in all_relative_imports]
__a = [F"""{f}.py""" for f in new_import_files]
__a = len(a__ ) == 0
all_relative_imports.extend(a__ )
return all_relative_imports
def __lowerCAmelCase ( a__ ) -> str:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import xxx`
__a = re.findall('''^\s*import\s+(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Only keep the top-level module
__a = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__a = list(set(a__ ) )
__a = []
for imp in imports:
try:
importlib.import_module(a__ )
except ImportError:
missing_packages.append(a__ )
if len(a__ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F"""{', '.join(a__ )}. Run `pip install {' '.join(a__ )}`""" )
return get_relative_imports(a__ )
def __lowerCAmelCase ( a__ , a__ ) -> Dict:
__a = module_path.replace(os.path.sep , '''.''' )
__a = importlib.import_module(a__ )
if class_name is None:
return find_pipeline_class(a__ )
return getattr(a__ , a__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
from ..pipelines import DiffusionPipeline
__a = dict(inspect.getmembers(a__ , inspect.isclass ) )
__a = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a__ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
__a = cls
return pipeline_class
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , ) -> Tuple:
__a = str(a__ )
__a = os.path.join(a__ , a__ )
if os.path.isfile(a__ ):
__a = module_file_or_url
__a = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__a = get_diffusers_versions()
# cut ".dev0"
__a = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__a = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__a = F"""v{revision}"""
elif revision == "main":
__a = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
__a = COMMUNITY_PIPELINES_URL.format(revision=a__ , pipeline=a__ )
try:
__a = cached_download(
a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = '''git'''
__a = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__a = hf_hub_download(
a__ , a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__a = check_imports(a__ )
# Now we move the module inside our cached dynamic modules.
__a = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a__ )
__a = Path(a__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a__ , submodule_path / module_file )
for module_needed in modules_needed:
__a = F"""{module_needed}.py"""
shutil.copy(os.path.join(a__ , a__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a__ , a__ ):
__a = use_auth_token
elif use_auth_token is True:
__a = HfFolder.get_token()
else:
__a = None
__a = model_info(a__ , revision=a__ , token=a__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__a = submodule_path / commit_hash
__a = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a__ )
if not (submodule_path / module_file).exists():
shutil.copy(a__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a__ , F"""{module_needed}.py""" , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return os.path.join(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ) -> Tuple:
__a = get_cached_module_file(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return get_class_in_module(a__ , final_module.replace('''.py''' , '''''' ) )
| 33
| 1
|
import numpy as np
from PIL import Image
def __lowerCAmelCase ( a__ , a__ , a__ ) -> np.ndarray:
__a = np.array(a__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__a = 0
__a = 0
__a = 0
__a = 0
# compute the shape of the output matrix
__a = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__a = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__a = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a = 0
__a = 0
return updated_arr
def __lowerCAmelCase ( a__ , a__ , a__ ) -> np.ndarray:
__a = np.array(a__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__a = 0
__a = 0
__a = 0
__a = 0
# compute the shape of the output matrix
__a = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__a = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__a = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__a = 0
__a = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
A : Optional[int] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 33
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : List[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __A( a ):
snake_case_ = '''table-transformer'''
snake_case_ = ['''past_key_values''']
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=100 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , **_snake_case , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
__a = backbone_config.get('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_snake_case )
# set timm attributes to None
__a , __a , __a = None, None, None
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = encoder_layers
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return 12
| 33
| 1
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __lowerCAmelCase ( ) -> Dict:
__a = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
__a = Dataset.from_dict(a__ )
return dataset
class __A( a ):
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = get_dataset()
__a = make_duplicate_clusters(_snake_case , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = get_dataset()
__a , __a = deduplicate_dataset(_snake_case )
self.assertEqual(len(_snake_case ) , 2 )
print(_snake_case )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , _snake_case )
| 33
|
import functools
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = len(a__ )
__a = len(a__ )
@functools.cache
def min_distance(a__ , a__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a__ ) , 1 + min_distance(a__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A : Dict = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
|
import logging
from transformers.configuration_utils import PretrainedConfig
A : Union[str, Any] = logging.getLogger(__name__)
class __A( a ):
snake_case_ = '''masked_bert'''
def __init__( self , _snake_case=30_522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="topK" , _snake_case="constant" , _snake_case=0.0 , **_snake_case , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , **_snake_case )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 33
| 1
|
A : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 33
|
import sys
def __lowerCAmelCase ( a__ ) -> Optional[int]:
__a = len(a__ )
__a = [[0 for x in range(a__ )] for x in range(a__ )]
__a = [[0 for x in range(a__ )] for x in range(a__ )]
for chain_length in range(2 , a__ ):
for a in range(1 , n - chain_length + 1 ):
__a = a + chain_length - 1
__a = sys.maxsize
for c in range(a__ , a__ ):
__a = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__a = cost
__a = c
return matrix, sol
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Any:
if i == j:
print('''A''' + str(a__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(a__ , a__ , optimal_solution[i][j] )
print_optiomal_solution(a__ , optimal_solution[i][j] + 1 , a__ )
print(''')''' , end=''' ''' )
def __lowerCAmelCase ( ) -> int:
__a = [30, 35, 15, 5, 10, 20, 25]
__a = len(a__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__a , __a = matrix_chain_order(a__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(a__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 33
| 1
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __A:
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=False , _snake_case=True , _snake_case="None" , _snake_case=3 , _snake_case=4 , _snake_case=None , ) -> int:
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = relative_attention
__a = position_biased_input
__a = pos_att_type
__a = scope
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = TFDebertaVaModel(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = [input_ids, input_mask]
__a = model(_snake_case )
__a = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = TFDebertaVaForMaskedLM(config=_snake_case )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = self.num_labels
__a = TFDebertaVaForSequenceClassification(config=_snake_case )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.num_labels
__a = TFDebertaVaForTokenClassification(config=_snake_case )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = TFDebertaVaForQuestionAnswering(config=_snake_case )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A( a , a , unittest.TestCase ):
snake_case_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = TFDebertaVaModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(_snake_case )
@require_tf
class __A( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
__a = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__a = model(_snake_case , attention_mask=_snake_case )[0]
__a = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 )
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : int = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ['MobileViTFeatureExtractor']
A : str = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
import re
from filelock import FileLock
try:
import nltk
A : str = True
except (ImportError, ModuleNotFoundError):
A : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __lowerCAmelCase ( a__ ) -> str:
re.sub('''<n>''' , '''''' , a__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(a__ ) )
| 33
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> np.ndarray:
__a = cva.getAffineTransform(a__ , a__ )
return cva.warpAffine(a__ , a__ , (rows, cols) )
if __name__ == "__main__":
# read original image
A : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A : Any = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A , A : List[Any] = gray_img.shape
# set different points to rotate image
A : str = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
A : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
A : Tuple = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
A : Tuple = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
A : Tuple = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A : Union[str, Any] = plt.figure(1)
A : str = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 33
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class __A:
def __init__( self , _snake_case ) -> None:
'''simple docstring'''
__a = value
__a = None
__a = None
class __A:
def __init__( self , _snake_case ) -> None:
'''simple docstring'''
__a = tree
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
|
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ = None ) -> list[list[str]]:
__a = word_bank or []
# create a table
__a = len(a__ ) + 1
__a = []
for _ in range(a__ ):
table.append([] )
# seed value
__a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
__a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 33
| 1
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
A : List[Any] = logging.get_logger(__name__)
class __A( a ):
def __init__( self , *_snake_case , **_snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 33
|
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( a__ ) -> List[str]:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += [key]
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
def __lowerCAmelCase ( *a__ ) -> str:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += keys
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
class __A( a ):
def __new__( cls , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , '''key_handler''' ):
setattr(_snake_case , '''key_handler''' , {} )
setattr(_snake_case , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__a = getattr(_snake_case , '''handle_key''' , [] )
for key in handled_keys:
__a = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> List[str]:
'''simple docstring'''
__a = get_character()
if char != KEYMAP["undefined"]:
__a = ord(_snake_case )
__a = cls.key_handler.get(_snake_case )
if handler:
__a = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 33
| 1
|
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ = None ) -> list[list[str]]:
__a = word_bank or []
# create a table
__a = len(a__ ) + 1
__a = []
for _ in range(a__ ):
table.append([] )
# seed value
__a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
__a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase ( a__ ) -> Optional[int]:
random.seed(a__ )
np.random.seed(a__ )
torch.manual_seed(a__ )
torch.cuda.manual_seed_all(a__ )
# ^^ safe to call this function even if cuda is not available
class __A:
def __init__( self , _snake_case , _snake_case = 0.9999 , _snake_case = 0.0 , _snake_case = 0 , _snake_case = False , _snake_case = 1.0 , _snake_case = 2 / 3 , _snake_case = None , _snake_case = None , **_snake_case , ) -> Tuple:
'''simple docstring'''
if isinstance(_snake_case , torch.nn.Module ):
__a = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , _snake_case , standard_warn=_snake_case , )
__a = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__a = True
if kwargs.get('''max_value''' , _snake_case ) is not None:
__a = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , _snake_case , standard_warn=_snake_case )
__a = kwargs['''max_value''']
if kwargs.get('''min_value''' , _snake_case ) is not None:
__a = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , _snake_case , standard_warn=_snake_case )
__a = kwargs['''min_value''']
__a = list(_snake_case )
__a = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , _snake_case ) is not None:
__a = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , _snake_case , standard_warn=_snake_case )
self.to(device=kwargs['''device'''] )
__a = None
__a = decay
__a = min_decay
__a = update_after_step
__a = use_ema_warmup
__a = inv_gamma
__a = power
__a = 0
__a = None # set in `step()`
__a = model_cls
__a = model_config
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case ) -> "EMAModel":
'''simple docstring'''
__a , __a = model_cls.load_config(_snake_case , return_unused_kwargs=_snake_case )
__a = model_cls.from_pretrained(_snake_case )
__a = cls(model.parameters() , model_cls=_snake_case , model_config=model.config )
ema_model.load_state_dict(_snake_case )
return ema_model
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
__a = self.model_cls.from_config(self.model_config )
__a = self.state_dict()
state_dict.pop('''shadow_params''' , _snake_case )
model.register_to_config(**_snake_case )
self.copy_to(model.parameters() )
model.save_pretrained(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> float:
'''simple docstring'''
__a = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__a = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__a = (1 + step) / (10 + step)
__a = min(_snake_case , self.decay )
# make sure decay is not smaller than min_decay
__a = max(_snake_case , self.min_decay )
return cur_decay_value
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
if isinstance(_snake_case , torch.nn.Module ):
__a = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , _snake_case , standard_warn=_snake_case , )
__a = parameters.parameters()
__a = list(_snake_case )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__a = self.get_decay(self.optimization_step )
__a = decay
__a = 1 - decay
__a = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _snake_case ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__a = deepspeed.zero.GatheredParameters(_snake_case , modifier_rank=_snake_case )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
__a = list(_snake_case )
for s_param, param in zip(self.shadow_params , _snake_case ):
param.data.copy_(s_param.to(param.device ).data )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None ) -> None:
'''simple docstring'''
__a = [
p.to(device=_snake_case , dtype=_snake_case ) if p.is_floating_point() else p.to(device=_snake_case )
for p in self.shadow_params
]
def SCREAMING_SNAKE_CASE_ ( self ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
__a = [param.detach().cpu().clone() for param in parameters]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , _snake_case ):
param.data.copy_(c_param.data )
# Better memory-wise.
__a = None
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
__a = copy.deepcopy(_snake_case )
__a = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
__a = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , _snake_case ):
raise ValueError('''Invalid min_decay''' )
__a = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , _snake_case ):
raise ValueError('''Invalid optimization_step''' )
__a = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , _snake_case ):
raise ValueError('''Invalid update_after_step''' )
__a = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _snake_case ):
raise ValueError('''Invalid use_ema_warmup''' )
__a = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
__a = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
__a = state_dict.get('''shadow_params''' , _snake_case )
if shadow_params is not None:
__a = shadow_params
if not isinstance(self.shadow_params , _snake_case ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(_snake_case , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 33
|
A : Optional[Any] = tuple[float, float, float]
A : Union[str, Any] = tuple[float, float, float]
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = end_pointa[0] - end_pointa[0]
__a = end_pointa[1] - end_pointa[1]
__a = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = ab[1] * ac[2] - ab[2] * ac[1] # *i
__a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__a = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> bool:
return tuple(round(a__ , a__ ) for x in vector ) == (0, 0, 0)
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 10 ) -> bool:
__a = create_vector(a__ , a__ )
__a = create_vector(a__ , a__ )
return is_zero_vector(get_ad_vectors_cross(a__ , a__ ) , a__ )
| 33
| 1
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 33
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
A : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
A : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Dict:
for attribute in key.split('''.''' ):
__a = getattr(a__ , a__ )
if weight_type is not None:
__a = getattr(a__ , a__ ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__a = True
if "*" in mapped_key:
__a = name.split(a__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = '''weight'''
else:
__a = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> int:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True ) -> Tuple:
if config_path is not None:
__a = UniSpeechSatConfig.from_pretrained(a__ )
else:
__a = UniSpeechSatConfig()
__a = ''''''
if is_finetuned:
__a = UniSpeechSatForCTC(a__ )
else:
__a = UniSpeechSatForPreTraining(a__ )
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__a = model[0].eval()
recursively_load_weights(a__ , a__ )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A : Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 33
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A( a ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a = bertabert.config.encoder.vocab_size
__a = tokenizer.sep_token_id
__a = tokenizer.cls_token_id
__a = 128
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a = train_dataset.select(range(32 ) )
__a = val_dataset.select(range(16 ) )
__a = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
__a = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
__a = inputs.input_ids
__a = inputs.attention_mask
__a = outputs.input_ids
__a = outputs.input_ids.copy()
__a = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
__a = pred.label_ids
__a = pred.predictions
# all unnecessary tokens are removed
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
__a = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a = self.get_auto_remove_tmp_dir()
__a = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 33
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
A : List[str] = logging.get_logger(__name__)
# General docstring
A : Optional[Any] = 'MobileNetV1Config'
# Base docstring
A : List[Any] = 'google/mobilenet_v1_1.0_224'
A : Dict = [1, 1_0_2_4, 7, 7]
# Image classification docstring
A : Dict = 'google/mobilenet_v1_1.0_224'
A : Tuple = 'tabby, tabby cat'
A : List[Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCAmelCase ( a__ , a__ , a__=None ) -> str:
__a = {}
if isinstance(a__ , a__ ):
__a = model.mobilenet_va
else:
__a = model
__a = '''MobilenetV1/Conv2d_0/'''
__a = backbone.conv_stem.convolution.weight
__a = backbone.conv_stem.normalization.bias
__a = backbone.conv_stem.normalization.weight
__a = backbone.conv_stem.normalization.running_mean
__a = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a = i + 1
__a = i * 2
__a = backbone.layer[pt_index]
__a = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__a = pointer.convolution.weight
__a = pointer.normalization.bias
__a = pointer.normalization.weight
__a = pointer.normalization.running_mean
__a = pointer.normalization.running_var
__a = backbone.layer[pt_index + 1]
__a = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__a = pointer.convolution.weight
__a = pointer.normalization.bias
__a = pointer.normalization.weight
__a = pointer.normalization.running_mean
__a = pointer.normalization.running_var
if isinstance(a__ , a__ ):
__a = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__a = model.classifier.weight
__a = model.classifier.bias
return tf_to_pt_map
def __lowerCAmelCase ( a__ , a__ , a__ ) -> List[str]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__a = tf.train.list_variables(a__ )
__a = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
__a = tf.train.load_variable(a__ , a__ )
__a = array
# Build TF to PyTorch weights loading map
__a = _build_tf_to_pytorch_map(a__ , a__ , a__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
__a = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__a = np.transpose(a__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__a = array.squeeze().transpose()
else:
__a = np.transpose(a__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
__a = torch.from_numpy(a__ )
tf_weights.pop(a__ , a__ )
tf_weights.pop(name + '''/RMSProp''' , a__ )
tf_weights.pop(name + '''/RMSProp_1''' , a__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , a__ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def __lowerCAmelCase ( a__ , a__ ) -> torch.Tensor:
__a , __a = features.shape[-2:]
__a , __a = conv_layer.stride
__a , __a = conv_layer.kernel_size
if in_height % stride_height == 0:
__a = max(kernel_height - stride_height , 0 )
else:
__a = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a = max(kernel_width - stride_width , 0 )
else:
__a = max(kernel_width - (in_width % stride_width) , 0 )
__a = pad_along_width // 2
__a = pad_along_width - pad_left
__a = pad_along_height // 2
__a = pad_along_height - pad_top
__a = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(a__ , a__ , '''constant''' , 0.0 )
class __A( nn.Module ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = 1 , _snake_case = 1 , _snake_case = False , _snake_case = True , _snake_case = True , ) -> None:
'''simple docstring'''
super().__init__()
__a = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__a = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a = nn.Convad(
in_channels=_snake_case , out_channels=_snake_case , kernel_size=_snake_case , stride=_snake_case , padding=_snake_case , groups=_snake_case , bias=_snake_case , padding_mode='''zeros''' , )
if use_normalization:
__a = nn.BatchNormad(
num_features=_snake_case , eps=config.layer_norm_eps , momentum=0.9997 , affine=_snake_case , track_running_stats=_snake_case , )
else:
__a = None
if use_activation:
if isinstance(_snake_case , _snake_case ):
__a = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _snake_case ):
__a = ACTaFN[config.hidden_act]
else:
__a = config.hidden_act
else:
__a = None
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> torch.Tensor:
'''simple docstring'''
if self.config.tf_padding:
__a = apply_tf_padding(_snake_case , self.convolution )
__a = self.convolution(_snake_case )
if self.normalization is not None:
__a = self.normalization(_snake_case )
if self.activation is not None:
__a = self.activation(_snake_case )
return features
class __A( a ):
snake_case_ = MobileNetVaConfig
snake_case_ = load_tf_weights_in_mobilenet_va
snake_case_ = '''mobilenet_v1'''
snake_case_ = '''pixel_values'''
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> None:
'''simple docstring'''
if isinstance(_snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_snake_case , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
A : Dict = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
A : Tuple = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , a , )
class __A( a ):
def __init__( self , _snake_case , _snake_case = True ) -> Dict:
'''simple docstring'''
super().__init__(_snake_case )
__a = config
__a = 32
__a = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a = MobileNetVaConvLayer(
_snake_case , in_channels=config.num_channels , out_channels=_snake_case , kernel_size=3 , stride=2 , )
__a = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a = nn.ModuleList()
for i in range(13 ):
__a = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_snake_case , in_channels=_snake_case , out_channels=_snake_case , kernel_size=3 , stride=strides[i] , groups=_snake_case , ) )
self.layer.append(
MobileNetVaConvLayer(
_snake_case , in_channels=_snake_case , out_channels=_snake_case , kernel_size=1 , ) )
__a = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case = None , _snake_case = None , _snake_case = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
'''simple docstring'''
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__a = self.conv_stem(_snake_case )
__a = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a = layer_module(_snake_case )
if output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = hidden_states
if self.pooler is not None:
__a = torch.flatten(self.pooler(_snake_case ) , start_dim=1 )
else:
__a = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case , pooler_output=_snake_case , hidden_states=_snake_case , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , a , )
class __A( a ):
def __init__( self , _snake_case ) -> None:
'''simple docstring'''
super().__init__(_snake_case )
__a = config.num_labels
__a = MobileNetVaModel(_snake_case )
__a = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a = nn.Dropout(config.classifier_dropout_prob , inplace=_snake_case )
__a = nn.Linear(_snake_case , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.mobilenet_va(_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case )
__a = outputs.pooler_output if return_dict else outputs[1]
__a = self.classifier(self.dropout(_snake_case ) )
__a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a = '''single_label_classification'''
else:
__a = '''multi_label_classification'''
if self.config.problem_type == "regression":
__a = MSELoss()
if self.num_labels == 1:
__a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a = loss_fct(_snake_case , _snake_case )
elif self.config.problem_type == "single_label_classification":
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a = BCEWithLogitsLoss()
__a = loss_fct(_snake_case , _snake_case )
if not return_dict:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states , )
| 33
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
import math
import sys
import cva
import numpy as np
def __lowerCAmelCase ( a__ , a__ ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__a = math.sqrt(a__ )
__a = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> np.ndarray:
__a = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __lowerCAmelCase ( a__ , a__ ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__a = np.zeros((kernel_size, kernel_size) )
for i in range(0 , a__ ):
for j in range(0 , a__ ):
__a = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , ) -> np.ndarray:
__a = np.zeros(img.shape )
__a = get_gauss_kernel(a__ , a__ )
__a , __a = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a = get_slice(a__ , a__ , a__ , a__ )
__a = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a = vec_gaussian(a__ , a__ )
__a = np.multiply(a__ , a__ )
__a = np.multiply(a__ , a__ )
__a = np.sum(a__ ) / np.sum(a__ )
__a = val
return imga
def __lowerCAmelCase ( a__ ) -> tuple:
__a = args[1] if args[1:] else '''../image_data/lena.jpg'''
__a = float(args[2] ) if args[2:] else 1.0
__a = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a = int(args[4] )
__a = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
A , A , A , A : Optional[int] = parse_args(sys.argv)
A : Tuple = cva.imread(filename, 0)
cva.imshow('input image', img)
A : Tuple = img / 2_5_5
A : Optional[int] = out.astype('float32')
A : Any = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
A : Optional[int] = out * 2_5_5
A : int = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 33
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __A( a ):
snake_case_ = 0
snake_case_ = False
snake_case_ = 3.0
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _snake_case )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
A : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
A : Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
A : int = torch.nn.Linear(1_0_0, 2_0_0)
A : Optional[int] = accelerator.prepare(model)
# Check the values changed in kwargs
A : List[Any] = ''
A : Tuple = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 33
| 1
|
def __lowerCAmelCase ( a__ ) -> None:
__a = generate_pascal_triangle(a__ )
for row_idx in range(a__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def __lowerCAmelCase ( a__ ) -> list[list[int]]:
if not isinstance(a__ , a__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__a = []
for current_row_idx in range(a__ ):
__a = populate_current_row(a__ , a__ )
triangle.append(a__ )
return triangle
def __lowerCAmelCase ( a__ , a__ ) -> list[int]:
__a = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__a , __a = 1, 1
for current_col_idx in range(1 , a__ ):
calculate_current_element(
a__ , a__ , a__ , a__ )
return current_row
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , ) -> None:
__a = triangle[current_row_idx - 1][current_col_idx - 1]
__a = triangle[current_row_idx - 1][current_col_idx]
__a = above_to_left_elt + above_to_right_elt
def __lowerCAmelCase ( a__ ) -> list[list[int]]:
if not isinstance(a__ , a__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__a = [[1]]
for row_index in range(1 , a__ ):
__a = [0] + result[-1] + [0]
__a = row_index + 1
# Calculate the number of distinct elements in a row
__a = sum(divmod(a__ , 2 ) )
__a = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__a = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__a = row_first_half + row_second_half
result.append(a__ )
return result
def __lowerCAmelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a__ , a__ ) -> None:
__a = F"""{func.__name__}({value})"""
__a = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a__ , a__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 33
|
import os
# Precomputes a list of the 100 first triangular numbers
A : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __lowerCAmelCase ( ) -> Tuple:
__a = os.path.dirname(os.path.realpath(a__ ) )
__a = os.path.join(a__ , '''words.txt''' )
__a = ''''''
with open(a__ ) as f:
__a = f.readline()
__a = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
__a = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 33
| 1
|
from string import ascii_uppercase
A : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
A : Union[str, Any] = dict(enumerate(ascii_uppercase))
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = len(a__ )
__a = 0
while True:
if x == i:
__a = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__a = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__a = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowerCAmelCase ( ) -> None:
__a = '''THE GERMAN ATTACK'''
__a = '''SECRET'''
__a = generate_key(a__ , a__ )
__a = cipher_text(a__ , a__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(a__ , a__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['PerceiverFeatureExtractor']
A : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __lowerCAmelCase ( a__ , a__ ) -> Optional[int]:
assert isinstance(a__ , a__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> str:
__a = tmp_path / '''cache'''
__a = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=a__ , keep_in_memory=a__ ).read()
_check_sql_dataset(a__ , a__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> Union[str, Any]:
__a = tmp_path / '''cache'''
__a = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=a__ , cache_dir=a__ ).read()
_check_sql_dataset(a__ , a__ )
def __lowerCAmelCase ( a__ ) -> int:
with contextlib.closing(sqlitea.connect(a__ ) ) as con:
__a = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def __lowerCAmelCase ( a__ , a__ , a__ ) -> List[Any]:
__a = tmp_path / '''cache'''
__a = os.path.join(a__ , '''tmp.sql''' )
__a = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=a__ ).read()
SqlDatasetWriter(a__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
__a = iter_sql_file(a__ )
__a = iter_sql_file(a__ )
for rowa, rowa in zip(a__ , a__ ):
assert rowa == rowa
@require_sqlalchemy
def __lowerCAmelCase ( a__ , a__ , a__ ) -> str:
__a = tmp_path / '''cache'''
__a = os.path.join(a__ , '''tmp.sql''' )
__a = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=a__ ).read()
SqlDatasetWriter(a__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
__a = iter_sql_file(a__ )
__a = iter_sql_file(a__ )
for rowa, rowa in zip(a__ , a__ ):
assert rowa == rowa
@require_sqlalchemy
def __lowerCAmelCase ( a__ , a__ , a__ ) -> str:
__a = tmp_path / '''cache'''
__a = os.path.join(a__ , '''tmp.sql''' )
__a = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=a__ ).read()
with pytest.raises(a__ ):
SqlDatasetWriter(a__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A( a , unittest.TestCase ):
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
snake_case_ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__a = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__a = UNetaDConditionModel(**_snake_case )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__a = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = self.dummy_unet
__a = self.dummy_movq
__a = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__a = DDIMScheduler(**_snake_case )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=0 ) -> Optional[Any]:
'''simple docstring'''
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_snake_case ) ).to(_snake_case )
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_snake_case )
# create init_image
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith('''mps''' ):
__a = torch.manual_seed(_snake_case )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__a = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_snake_case )
__a = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = pipe(**self.get_dummy_inputs(_snake_case ) )
__a = output.images
__a = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__a = init_image.resize((512, 512) )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
__a = torch.from_numpy(np.array(_snake_case ) ).float() / 255.0
__a = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__a = '''A robot, 4k photo'''
__a = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
__a = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
__a = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
__a = torch.Generator(device='''cpu''' ).manual_seed(0 )
__a , __a = pipe_prior(
_snake_case , image=_snake_case , strength=0.85 , generator=_snake_case , negative_prompt='''''' , ).to_tuple()
__a = pipeline(
image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , hint=_snake_case , generator=_snake_case , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 33
|
from string import ascii_uppercase
A : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
A : Union[str, Any] = dict(enumerate(ascii_uppercase))
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = len(a__ )
__a = 0
while True:
if x == i:
__a = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__a = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__a = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowerCAmelCase ( ) -> None:
__a = '''THE GERMAN ATTACK'''
__a = '''SECRET'''
__a = generate_key(a__ , a__ )
__a = cipher_text(a__ , a__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(a__ , a__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 33
| 1
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __A:
def __init__( self , _snake_case , ) -> Tuple:
'''simple docstring'''
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = 99
__a = 32
__a = 2
__a = 4
__a = 37
__a = '''gelu'''
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = None
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = TFEsmModel(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_snake_case )
__a = [input_ids, input_mask]
__a = model(_snake_case )
__a = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a = True
__a = TFEsmModel(config=_snake_case )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
__a = model(_snake_case )
__a = [input_ids, input_mask]
__a = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
__a = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
__a = TFEsmForMaskedLM(config=_snake_case )
__a = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
__a = self.num_labels
__a = TFEsmForTokenClassification(config=_snake_case )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A( a , a , unittest.TestCase ):
snake_case_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = TFEsmModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__a = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
__a = model.get_output_embeddings()
assert x is None
__a = model.get_bias()
assert name is None
@require_tf
class __A( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(_snake_case )[0]
__a = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
__a = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__a = model(_snake_case )[0]
# compare the actual values for a slice.
__a = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 33
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A : str = parser.parse_args()
if args.model_type == "roberta":
A : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
A : Any = 'roberta'
elif args.model_type == "gpt2":
A : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
A : List[str] = 'transformer'
A : Dict = model.state_dict()
A : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A : Union[str, Any] = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A : Any = F"{prefix}.embeddings.{w}.weight"
A : Union[str, Any] = state_dict[param_name]
for w in ["weight", "bias"]:
A : List[Any] = F"{prefix}.embeddings.LayerNorm.{w}"
A : List[str] = state_dict[param_name]
# Transformer Blocks #
A : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A : Any = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
A : List[str] = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A : Optional[int] = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F"lm_head.dense.{w}"]
A : List[str] = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A : List[str] = state_dict[F"{prefix}.ln_f.{w}"]
A : Dict = state_dict['lm_head.weight']
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 33
| 1
|
def __lowerCAmelCase ( a__ ) -> int:
if n == 1 or not isinstance(a__ , a__ ):
return 0
elif n == 2:
return 1
else:
__a = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase ( a__ ) -> int:
__a = 0
__a = 2
while digits < n:
index += 1
__a = len(str(fibonacci(a__ ) ) )
return index
def __lowerCAmelCase ( a__ = 1000 ) -> int:
return fibonacci_digits_index(a__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33
|
import os
import numpy
import onnx
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = a.name
__a = b.name
__a = ''''''
__a = ''''''
__a = a == b
__a = name_a
__a = name_b
return res
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> str:
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Union[str, Any]:
__a = list(model.graph.initializer )
__a = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__a = inits[i].name
__a = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def __lowerCAmelCase ( a__ ) -> str:
__a = os.path.dirname(a__ )
__a = os.path.basename(a__ )
__a = onnx.load(os.path.join(a__ , a__ ) )
__a = list(model.graph.initializer )
__a = set()
__a = {}
__a = []
__a = 0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
__a = inits[j].data_type
__a = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , a__ )
total_reduced_size += mem_size
__a = inits[i].name
__a = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
__a = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
__a = sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
__a = '''optimized_''' + model_file_name
__a = os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model
| 33
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
A : Optional[int] = logging.get_logger(__name__)
@dataclass
class __A( a ):
snake_case_ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__a = deprecated_arg[3:]
setattr(self , _snake_case , not kwargs.pop(_snake_case ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
__a = kwargs.pop('''torchscript''' , self.torchscript )
__a = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__a = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**_snake_case )
snake_case_ = field(default=a , metadata={'''help''': '''Trace the models using torchscript'''} )
snake_case_ = field(default=a , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
snake_case_ = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__a = torch.device('''cpu''' )
__a = 0
elif is_torch_tpu_available():
__a = xm.xla_device()
__a = 0
else:
__a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__a = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> "torch.device":
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.n_gpu > 0
| 33
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Dict = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A:
def __init__( self , _snake_case , _snake_case=3 , _snake_case=32 , _snake_case=3 , _snake_case=10 , _snake_case=[10, 20, 30, 40] , _snake_case=[1, 1, 2, 1] , _snake_case=True , _snake_case=True , _snake_case="relu" , _snake_case=3 , _snake_case=None , ) -> Optional[Any]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = num_channels
__a = embeddings_size
__a = hidden_sizes
__a = depths
__a = is_training
__a = use_labels
__a = hidden_act
__a = num_labels
__a = scope
__a = len(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[str]:
'''simple docstring'''
__a = TFResNetModel(config=_snake_case )
__a = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.num_labels
__a = TFResNetForImageClassification(_snake_case )
__a = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A( a , a , unittest.TestCase ):
snake_case_ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
snake_case_ = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = TFResNetModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_snake_case )
__a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
__a = model_class(_snake_case )
__a = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__a = layer_type
__a = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __lowerCAmelCase ( ) -> Union[str, Any]:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_snake_case , return_tensors='''tf''' )
# forward pass
__a = model(**_snake_case )
# verify the logits
__a = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _snake_case )
__a = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1E-4 ) )
| 33
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A : str = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
A : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( ) -> Tuple:
__a = '''https://pypi.org/pypi/diffusers/json'''
__a = json.loads(request.urlopen(a__ ).read() )['''releases'''].keys()
return sorted(a__ , key=lambda a__ : version.Version(a__ ) )
def __lowerCAmelCase ( ) -> List[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a__ )
os.makedirs(a__ , exist_ok=a__ )
__a = Path(a__ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
init_hf_modules()
__a = Path(a__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a__ , exist_ok=a__ )
__a = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import .xxx`
__a = re.findall('''^\s*import\s+\.(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Unique-ify
return list(set(a__ ) )
def __lowerCAmelCase ( a__ ) -> Any:
__a = False
__a = [module_file]
__a = []
# Let's recurse through all relative imports
while not no_change:
__a = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a__ ) )
__a = Path(a__ ).parent
__a = [str(module_path / m ) for m in new_imports]
__a = [f for f in new_import_files if f not in all_relative_imports]
__a = [F"""{f}.py""" for f in new_import_files]
__a = len(a__ ) == 0
all_relative_imports.extend(a__ )
return all_relative_imports
def __lowerCAmelCase ( a__ ) -> str:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import xxx`
__a = re.findall('''^\s*import\s+(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Only keep the top-level module
__a = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__a = list(set(a__ ) )
__a = []
for imp in imports:
try:
importlib.import_module(a__ )
except ImportError:
missing_packages.append(a__ )
if len(a__ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F"""{', '.join(a__ )}. Run `pip install {' '.join(a__ )}`""" )
return get_relative_imports(a__ )
def __lowerCAmelCase ( a__ , a__ ) -> Dict:
__a = module_path.replace(os.path.sep , '''.''' )
__a = importlib.import_module(a__ )
if class_name is None:
return find_pipeline_class(a__ )
return getattr(a__ , a__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
from ..pipelines import DiffusionPipeline
__a = dict(inspect.getmembers(a__ , inspect.isclass ) )
__a = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a__ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
__a = cls
return pipeline_class
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , ) -> Tuple:
__a = str(a__ )
__a = os.path.join(a__ , a__ )
if os.path.isfile(a__ ):
__a = module_file_or_url
__a = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__a = get_diffusers_versions()
# cut ".dev0"
__a = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__a = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__a = F"""v{revision}"""
elif revision == "main":
__a = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
__a = COMMUNITY_PIPELINES_URL.format(revision=a__ , pipeline=a__ )
try:
__a = cached_download(
a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = '''git'''
__a = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__a = hf_hub_download(
a__ , a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__a = check_imports(a__ )
# Now we move the module inside our cached dynamic modules.
__a = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a__ )
__a = Path(a__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a__ , submodule_path / module_file )
for module_needed in modules_needed:
__a = F"""{module_needed}.py"""
shutil.copy(os.path.join(a__ , a__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a__ , a__ ):
__a = use_auth_token
elif use_auth_token is True:
__a = HfFolder.get_token()
else:
__a = None
__a = model_info(a__ , revision=a__ , token=a__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__a = submodule_path / commit_hash
__a = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a__ )
if not (submodule_path / module_file).exists():
shutil.copy(a__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a__ , F"""{module_needed}.py""" , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return os.path.join(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ) -> Tuple:
__a = get_cached_module_file(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return get_class_in_module(a__ , final_module.replace('''.py''' , '''''' ) )
| 33
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A:
def __init__( self , _snake_case , _snake_case=2 , _snake_case=True , _snake_case=False , _snake_case=10 , _snake_case=3 , _snake_case=32 * 8 , _snake_case=32 * 8 , _snake_case=4 , _snake_case=64 , ) -> str:
'''simple docstring'''
__a = parent
__a = batch_size
__a = is_training
__a = use_auxiliary_loss
__a = num_queries
__a = num_channels
__a = min_size
__a = max_size
__a = num_labels
__a = hidden_dim
__a = hidden_dim
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_snake_case )
__a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_snake_case )
__a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_snake_case ) > 0.5
).float()
__a = (torch.rand((self.batch_size, self.num_labels) , device=_snake_case ) > 0.5).long()
__a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__a = self.num_queries
__a = self.num_labels
__a = [1, 1, 1, 1]
__a = self.num_channels
__a = 64
__a = 128
__a = self.hidden_dim
__a = self.hidden_dim
__a = self.hidden_dim
return config
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a , __a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str:
'''simple docstring'''
__a = output.encoder_hidden_states
__a = output.pixel_decoder_hidden_states
__a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_snake_case ) , config.decoder_layers )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case=False ) -> Any:
'''simple docstring'''
with torch.no_grad():
__a = MaskaFormerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = model(pixel_values=_snake_case , pixel_mask=_snake_case )
__a = model(_snake_case , output_hidden_states=_snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = MaskaFormerForUniversalSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
def comm_check_on_output(_snake_case ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__a = model(pixel_values=_snake_case , pixel_mask=_snake_case )
__a = model(_snake_case )
comm_check_on_output(_snake_case )
__a = model(
pixel_values=_snake_case , pixel_mask=_snake_case , mask_labels=_snake_case , class_labels=_snake_case )
comm_check_on_output(_snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A( a , a , unittest.TestCase ):
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = MaskaFormerModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_snake_case )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_snake_case )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__a = MaskaFormerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = (self.model_tester.min_size,) * 2
__a = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_snake_case ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_snake_case ),
'''class_labels''': torch.zeros(2 , 10 , device=_snake_case ).long(),
}
__a = self.model_tester.get_config()
__a = MaskaFormerForUniversalSegmentation(_snake_case ).to(_snake_case )
__a = model(**_snake_case )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_snake_case ).to(_snake_case )
__a = model(**_snake_case , output_attentions=_snake_case )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
if not self.model_tester.is_training:
return
__a = self.all_model_classes[1]
__a , __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = model_class(_snake_case )
model.to(_snake_case )
model.train()
__a = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case ).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.all_model_classes[1]
__a , __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = True
__a = True
__a = model_class(_snake_case ).to(_snake_case )
model.train()
__a = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case )
__a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A : Any = 1E-4
def __lowerCAmelCase ( ) -> int:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __A( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_snake_case )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(_snake_case , return_tensors='''pt''' ).to(_snake_case )
__a = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_snake_case , (1, 3, 384, 384) )
with torch.no_grad():
__a = model(**_snake_case )
__a = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case ) )
__a = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case ) )
__a = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _snake_case , atol=_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_snake_case ).eval()
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(_snake_case , return_tensors='''pt''' ).to(_snake_case )
__a = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_snake_case , (1, 3, 384, 384) )
with torch.no_grad():
__a = model(**_snake_case )
# masks_queries_logits
__a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__a = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__a = torch.tensor(_snake_case ).to(_snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _snake_case , atol=_snake_case ) )
# class_queries_logits
__a = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__a = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _snake_case , atol=_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_snake_case ).eval()
__a = self.default_image_processor
__a = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
__a = inputs['''pixel_values'''].to(_snake_case )
__a = [el.to(_snake_case ) for el in inputs['''mask_labels''']]
__a = [el.to(_snake_case ) for el in inputs['''class_labels''']]
with torch.no_grad():
__a = model(**_snake_case )
self.assertTrue(outputs.loss is not None )
| 33
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : List[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __A( a ):
snake_case_ = '''table-transformer'''
snake_case_ = ['''past_key_values''']
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=100 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , **_snake_case , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
__a = backbone_config.get('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_snake_case )
# set timm attributes to None
__a , __a , __a = None, None, None
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = encoder_layers
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return 12
| 33
| 1
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger('transformers.models.encodec')
A : List[Any] = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
A : Dict = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
A : Dict = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
A : List[Any] = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
A : Dict = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
A : Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
A : Any = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
A : Union[str, Any] = []
A : List[str] = []
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
for attribute in key.split('''.''' ):
__a = getattr(a__ , a__ )
if weight_type is not None:
__a = getattr(a__ , a__ ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
elif weight_type == "running_mean":
__a = value
elif weight_type == "running_var":
__a = value
elif weight_type == "num_batches_tracked":
__a = value
elif weight_type == "weight_ih_l0":
__a = value
elif weight_type == "weight_hh_l0":
__a = value
elif weight_type == "bias_ih_l0":
__a = value
elif weight_type == "bias_hh_l0":
__a = value
elif weight_type == "weight_ih_l1":
__a = value
elif weight_type == "weight_hh_l1":
__a = value
elif weight_type == "bias_ih_l1":
__a = value
elif weight_type == "bias_hh_l1":
__a = value
else:
__a = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ ) -> Union[str, Any]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__a , __a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
__a = []
if model_name == "encodec_24khz" or "encodec_32khz":
__a = MAPPING_24K
elif model_name == "encodec_48khz":
__a = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(a__ , a__ ):
logger.info(F"""{name} was ignored""" )
continue
__a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__a , __a = key.split('''.*.''' )
if prefix in name and suffix in name:
__a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
__a = True
if "*" in mapped_key:
__a = name.split(a__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "weight_ih_l0" in name:
__a = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
__a = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
__a = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
__a = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
__a = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
__a = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
__a = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
__a = '''bias_hh_l1'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
__a = '''weight'''
elif "running_mean" in name:
__a = '''running_mean'''
elif "running_var" in name:
__a = '''running_var'''
elif "num_batches_tracked" in name:
__a = '''num_batches_tracked'''
else:
__a = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__ , a__=None , a__=None , ) -> Optional[Any]:
if config_path is not None:
__a = EncodecConfig.from_pretrained(a__ )
else:
__a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__a = [8, 5, 4, 4]
__a = [2.2]
__a = 64
__a = 3_2000
__a = 2048
__a = False
__a = False
__a = False
elif model_name == "encodec_48khz":
__a = [8, 5, 4, 2]
__a = [3.0, 6.0, 12.0, 24.0]
__a = 4_8000
__a = 2
__a = False
__a = '''time_group_norm'''
__a = True
__a = 1.0
__a = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
__a = EncodecModel(a__ )
__a = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(a__ )
__a = torch.load(a__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__a = original_checkpoint['''best_state''']
recursively_load_weights(a__ , a__ , a__ )
model.save_pretrained(a__ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(a__ )
model.push_to_hub(a__ )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
A : Tuple = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 33
|
import functools
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = len(a__ )
__a = len(a__ )
@functools.cache
def min_distance(a__ , a__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a__ ) , 1 + min_distance(a__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 1
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
A : Optional[Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
A : List[str] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
A : str = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=True , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
if rouge_types is None:
__a = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
__a = rouge_scorer.RougeScorer(rouge_types=_snake_case , use_stemmer=_snake_case )
if use_aggregator:
__a = scoring.BootstrapAggregator()
else:
__a = []
for ref, pred in zip(_snake_case , _snake_case ):
__a = scorer.score(_snake_case , _snake_case )
if use_aggregator:
aggregator.add_scores(_snake_case )
else:
scores.append(_snake_case )
if use_aggregator:
__a = aggregator.aggregate()
else:
__a = {}
for key in scores[0]:
__a = [score[key] for score in scores]
return result
| 33
|
import logging
from transformers.configuration_utils import PretrainedConfig
A : Union[str, Any] = logging.getLogger(__name__)
class __A( a ):
snake_case_ = '''masked_bert'''
def __init__( self , _snake_case=30_522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="topK" , _snake_case="constant" , _snake_case=0.0 , **_snake_case , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , **_snake_case )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 33
| 1
|
from collections import defaultdict
def __lowerCAmelCase ( a__ , a__ ) -> bool:
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(''' ''' , '''''' )
__a = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(a__ ) != len(a__ ):
return False
# Default values for count should be 0
__a = defaultdict(a__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(a__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A : Tuple = input('Enter the first string ').strip()
A : str = input('Enter the second string ').strip()
A : int = check_anagrams(input_a, input_b)
print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 33
|
import sys
def __lowerCAmelCase ( a__ ) -> Optional[int]:
__a = len(a__ )
__a = [[0 for x in range(a__ )] for x in range(a__ )]
__a = [[0 for x in range(a__ )] for x in range(a__ )]
for chain_length in range(2 , a__ ):
for a in range(1 , n - chain_length + 1 ):
__a = a + chain_length - 1
__a = sys.maxsize
for c in range(a__ , a__ ):
__a = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__a = cost
__a = c
return matrix, sol
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Any:
if i == j:
print('''A''' + str(a__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(a__ , a__ , optimal_solution[i][j] )
print_optiomal_solution(a__ , optimal_solution[i][j] + 1 , a__ )
print(''')''' , end=''' ''' )
def __lowerCAmelCase ( ) -> int:
__a = [30, 35, 15, 5, 10, 20, 25]
__a = len(a__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__a , __a = matrix_chain_order(a__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(a__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 33
| 1
|
class __A:
def __init__( self ) -> Any:
'''simple docstring'''
__a = {}
def SCREAMING_SNAKE_CASE_ ( self ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_snake_case , ''' -> ''' , ''' -> '''.join([str(_snake_case ) for j in self.vertex[i]] ) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_snake_case )
else:
# else make a new vertex
__a = [to_vertex]
def SCREAMING_SNAKE_CASE_ ( self ) -> None:
'''simple docstring'''
__a = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> None:
'''simple docstring'''
__a = True
print(_snake_case , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_snake_case , _snake_case )
if __name__ == "__main__":
A : List[str] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : int = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ['MobileViTFeatureExtractor']
A : str = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
A : Union[str, Any] = True
from torch.cuda.amp import autocast
A : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class __A:
snake_case_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
snake_case_ = field(
default=a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
snake_case_ = field(
default=a , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
snake_case_ = field(
default=a , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
snake_case_ = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
snake_case_ = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
snake_case_ = field(
default=0.999_995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def __lowerCAmelCase ( a__ , a__ ) -> Union[str, Any]:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a = logging.WARNING
if model_args.verbose_logging:
__a = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__a = logging.INFO
logger.setLevel(a__ )
@dataclass
class __A:
snake_case_ = field(
default=a , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
snake_case_ = field(
default=a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
snake_case_ = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
snake_case_ = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
snake_case_ = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
snake_case_ = field(
default=a , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
snake_case_ = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
snake_case_ = field(
default=a , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
snake_case_ = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class __A:
snake_case_ = 42
snake_case_ = 42
snake_case_ = "longest"
snake_case_ = None
snake_case_ = None
def __call__( self , _snake_case ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
__a = self.feature_extractor.pad(
_snake_case , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__a = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
__a = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__a = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
__a = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__a = 1
__a = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__a = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_snake_case , min_masks=2 , )
return batch
class __A( a ):
def __init__( self , *_snake_case , _snake_case=1 , _snake_case=0 , _snake_case=1.0 , **_snake_case ) -> Any:
'''simple docstring'''
super().__init__(*_snake_case , **_snake_case )
__a = 0
__a = max_gumbel_temp
__a = min_gumbel_temp
__a = gumbel_temp_decay
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> torch.Tensor:
'''simple docstring'''
model.train()
__a = self._prepare_inputs(_snake_case )
if self.use_amp:
with autocast():
__a = self.compute_loss(_snake_case , _snake_case )
else:
__a = self.compute_loss(_snake_case , _snake_case )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__a = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__a = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__a = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_snake_case ).backward()
elif self.use_apex:
with amp.scale_loss(_snake_case , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_snake_case )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __lowerCAmelCase ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__a , __a , __a = parser.parse_args_into_dataclasses()
configure_logger(a__ , a__ )
# Downloading and loading a dataset from the hub.
__a = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__a = DatasetDict()
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__a = DatasetDict()
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__a = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=a__ )
def prepare_dataset(a__ ):
# check that all files have the correct sampling rate
__a , __a = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__a = datasets.map(
a__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
__a = vectorized_datasets.filter(
lambda a__ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(a__ ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__a = vectorized_datasets.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__a = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
__a = WavaVecaForPreTraining(a__ )
__a = DataCollatorForWavaVecaPretraining(model=a__ , feature_extractor=a__ )
__a = WavaVecaPreTrainer(
model=a__ , data_collator=a__ , args=a__ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=a__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 33
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> np.ndarray:
__a = cva.getAffineTransform(a__ , a__ )
return cva.warpAffine(a__ , a__ , (rows, cols) )
if __name__ == "__main__":
# read original image
A : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A : Any = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A , A : List[Any] = gray_img.shape
# set different points to rotate image
A : str = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
A : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
A : Tuple = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
A : Tuple = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
A : Tuple = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A : Union[str, Any] = plt.figure(1)
A : str = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 33
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
A : int = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __lowerCAmelCase ( a__ , a__ , a__=8 ) -> List[Any]:
__a = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__a = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __A( a ):
def __init__( self , _snake_case , _snake_case , _snake_case , ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_snake_case , scheduler=_snake_case , movq=_snake_case , )
__a = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
'''simple docstring'''
if latents is None:
__a = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__a = latents.to(_snake_case )
__a = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> int:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device(F"""cuda:{gpu_id}""" )
__a = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__a = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__a = None
for cpu_offloaded_model in [self.unet, self.movq]:
__a , __a = cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case )
# We'll offload the last model manually.
__a = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_snake_case )
def __call__( self , _snake_case , _snake_case , _snake_case , _snake_case = 512 , _snake_case = 512 , _snake_case = 100 , _snake_case = 4.0 , _snake_case = 1 , _snake_case = None , _snake_case = None , _snake_case = "pil" , _snake_case = True , ) -> Union[str, Any]:
'''simple docstring'''
__a = self._execution_device
__a = guidance_scale > 1.0
if isinstance(_snake_case , _snake_case ):
__a = torch.cat(_snake_case , dim=0 )
if isinstance(_snake_case , _snake_case ):
__a = torch.cat(_snake_case , dim=0 )
if isinstance(_snake_case , _snake_case ):
__a = torch.cat(_snake_case , dim=0 )
__a = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__a = image_embeds.repeat_interleave(_snake_case , dim=0 )
__a = negative_image_embeds.repeat_interleave(_snake_case , dim=0 )
__a = hint.repeat_interleave(_snake_case , dim=0 )
__a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case )
__a = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case )
self.scheduler.set_timesteps(_snake_case , device=_snake_case )
__a = self.scheduler.timesteps
__a = self.movq.config.latent_channels
__a , __a = downscale_height_and_width(_snake_case , _snake_case , self.movq_scale_factor )
# create initial latent
__a = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _snake_case , _snake_case , _snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = {'''image_embeds''': image_embeds, '''hint''': hint}
__a = self.unet(
sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0]
if do_classifier_free_guidance:
__a , __a = noise_pred.split(latents.shape[1] , dim=1 )
__a , __a = noise_pred.chunk(2 )
__a , __a = variance_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__a = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__a , __a = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(
_snake_case , _snake_case , _snake_case , generator=_snake_case , )[0]
# post-processing
__a = self.movq.decode(_snake_case , force_not_quantize=_snake_case )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__a = image * 0.5 + 0.5
__a = image.clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 33
|
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ = None ) -> list[list[str]]:
__a = word_bank or []
# create a table
__a = len(a__ ) + 1
__a = []
for _ in range(a__ ):
table.append([] )
# seed value
__a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
__a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 33
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
|
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( a__ ) -> List[str]:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += [key]
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
def __lowerCAmelCase ( *a__ ) -> str:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += keys
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
class __A( a ):
def __new__( cls , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , '''key_handler''' ):
setattr(_snake_case , '''key_handler''' , {} )
setattr(_snake_case , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__a = getattr(_snake_case , '''handle_key''' , [] )
for key in handled_keys:
__a = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> List[str]:
'''simple docstring'''
__a = get_character()
if char != KEYMAP["undefined"]:
__a = ord(_snake_case )
__a = cls.key_handler.get(_snake_case )
if handler:
__a = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 33
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __A( a , a ):
snake_case_ = '''pixel_values'''
snake_case_ = False
snake_case_ = TimmBackboneConfig
def __init__( self , _snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , '''timm''' )
super().__init__(_snake_case )
__a = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(_snake_case , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
__a = getattr(_snake_case , '''use_pretrained_backbone''' , _snake_case )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
__a = config.out_indices if getattr(_snake_case , '''out_indices''' , _snake_case ) is not None else (-1,)
__a = timm.create_model(
config.backbone , pretrained=_snake_case , features_only=config.features_only , in_chans=config.num_channels , out_indices=_snake_case , **_snake_case , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__a = self._backbone.return_layers
__a = {layer['''module''']: str(_snake_case ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
__a = kwargs.pop('''config''' , TimmBackboneConfig() )
__a = kwargs.pop('''use_timm_backbone''' , _snake_case )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
__a = kwargs.pop('''num_channels''' , config.num_channels )
__a = kwargs.pop('''features_only''' , config.features_only )
__a = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
__a = kwargs.pop('''out_indices''' , config.out_indices )
__a = TimmBackboneConfig(
backbone=_snake_case , num_channels=_snake_case , features_only=_snake_case , use_pretrained_backbone=_snake_case , out_indices=_snake_case , )
return super()._from_config(_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__a = self._all_layers
__a = self._backbone(_snake_case , **_snake_case )
__a = self._return_layers
__a = tuple(hidden_states[i] for i in self.out_indices )
else:
__a = self._backbone(_snake_case , **_snake_case )
__a = None
__a = tuple(_snake_case )
__a = tuple(_snake_case ) if hidden_states is not None else None
if not return_dict:
__a = (feature_maps,)
if output_hidden_states:
__a = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_snake_case , hidden_states=_snake_case , attentions=_snake_case )
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ = None , a__ = None ) -> None:
if start is None:
__a = 0
if end is None:
__a = len(a__ ) - 1
if start >= end:
return
__a = (start + end) // 2
slowsort(a__ , a__ , a__ )
slowsort(a__ , mid + 1 , a__ )
if sequence[end] < sequence[mid]:
__a , __a = sequence[mid], sequence[end]
slowsort(a__ , a__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 33
|
A : Optional[Any] = tuple[float, float, float]
A : Union[str, Any] = tuple[float, float, float]
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = end_pointa[0] - end_pointa[0]
__a = end_pointa[1] - end_pointa[1]
__a = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = ab[1] * ac[2] - ab[2] * ac[1] # *i
__a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__a = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> bool:
return tuple(round(a__ , a__ ) for x in vector ) == (0, 0, 0)
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 10 ) -> bool:
__a = create_vector(a__ , a__ )
__a = create_vector(a__ , a__ )
return is_zero_vector(get_ad_vectors_cross(a__ , a__ ) , a__ )
| 33
| 1
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
A : Union[str, Any] = logging.get_logger(__name__)
# General docstring
A : Tuple = 'RegNetConfig'
# Base docstring
A : Dict = 'facebook/regnet-y-040'
A : Optional[int] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
A : Tuple = 'facebook/regnet-y-040'
A : List[str] = 'tabby, tabby cat'
A : Optional[int] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A( tf.keras.layers.Layer ):
def __init__( self , _snake_case , _snake_case = 3 , _snake_case = 1 , _snake_case = 1 , _snake_case = "relu" , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__a = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__a = tf.keras.layers.ConvaD(
filters=_snake_case , kernel_size=_snake_case , strides=_snake_case , padding='''VALID''' , groups=_snake_case , use_bias=_snake_case , name='''convolution''' , )
__a = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
__a = ACTaFN[activation] if activation is not None else tf.identity
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.convolution(self.padding(_snake_case ) )
__a = self.normalization(_snake_case )
__a = self.activation(_snake_case )
return hidden_state
class __A( tf.keras.layers.Layer ):
def __init__( self , _snake_case , **_snake_case ) -> str:
'''simple docstring'''
super().__init__(**_snake_case )
__a = config.num_channels
__a = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = shape_list(_snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__a = tf.transpose(_snake_case , perm=(0, 2, 3, 1) )
__a = self.embedder(_snake_case )
return hidden_state
class __A( tf.keras.layers.Layer ):
def __init__( self , _snake_case , _snake_case = 2 , **_snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = tf.keras.layers.ConvaD(
filters=_snake_case , kernel_size=1 , strides=_snake_case , use_bias=_snake_case , name='''convolution''' )
__a = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = False ) -> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(_snake_case ) , training=_snake_case )
class __A( tf.keras.layers.Layer ):
def __init__( self , _snake_case , _snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case , name='''pooler''' )
__a = [
tf.keras.layers.ConvaD(filters=_snake_case , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_snake_case , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.pooler(_snake_case )
for layer_module in self.attention:
__a = layer_module(_snake_case )
__a = hidden_state * pooled
return hidden_state
class __A( tf.keras.layers.Layer ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 1 , **_snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = in_channels != out_channels or stride != 1
__a = max(1 , out_channels // config.groups_width )
__a = (
TFRegNetShortCut(_snake_case , stride=_snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__a = [
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=_snake_case , name='''layer.2''' ),
]
__a = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
__a = hidden_state
for layer_module in self.layers:
__a = layer_module(_snake_case )
__a = self.shortcut(_snake_case )
hidden_state += residual
__a = self.activation(_snake_case )
return hidden_state
class __A( tf.keras.layers.Layer ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 1 , **_snake_case ) -> Any:
'''simple docstring'''
super().__init__(**_snake_case )
__a = in_channels != out_channels or stride != 1
__a = max(1 , out_channels // config.groups_width )
__a = (
TFRegNetShortCut(_snake_case , stride=_snake_case , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__a = [
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_snake_case , kernel_size=1 , activation=_snake_case , name='''layer.3''' ),
]
__a = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = hidden_state
for layer_module in self.layers:
__a = layer_module(_snake_case )
__a = self.shortcut(_snake_case )
hidden_state += residual
__a = self.activation(_snake_case )
return hidden_state
class __A( tf.keras.layers.Layer ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 2 , _snake_case = 2 , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__a = [
# downsampling is done in the first layer with stride of 2
layer(_snake_case , _snake_case , _snake_case , stride=_snake_case , name='''layers.0''' ),
*[layer(_snake_case , _snake_case , _snake_case , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
for layer_module in self.layers:
__a = layer_module(_snake_case )
return hidden_state
class __A( tf.keras.layers.Layer ):
def __init__( self , _snake_case , **_snake_case ) -> Tuple:
'''simple docstring'''
super().__init__(**_snake_case )
__a = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__a = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_snake_case , _snake_case , _snake_case , depth=_snake_case , name=F"""stages.{i+1}""" ) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = False , _snake_case = True ) -> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
__a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__a = hidden_states + (hidden_state,)
__a = stage_module(_snake_case )
if output_hidden_states:
__a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_snake_case , hidden_states=_snake_case )
@keras_serializable
class __A( tf.keras.layers.Layer ):
snake_case_ = RegNetConfig
def __init__( self , _snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = config
__a = TFRegNetEmbeddings(_snake_case , name='''embedder''' )
__a = TFRegNetEncoder(_snake_case , name='''encoder''' )
__a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case , name='''pooler''' )
@unpack_inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.embedder(_snake_case , training=_snake_case )
__a = self.encoder(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case )
__a = encoder_outputs[0]
__a = self.pooler(_snake_case )
# Change to NCHW output format have uniformity in the modules
__a = tf.transpose(_snake_case , perm=(0, 3, 1, 2) )
__a = tf.transpose(_snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__a = tuple([tf.transpose(_snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case , pooler_output=_snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __A( a ):
snake_case_ = RegNetConfig
snake_case_ = '''regnet'''
snake_case_ = '''pixel_values'''
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
A : Tuple = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
A : str = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , a , )
class __A( a ):
def __init__( self , _snake_case , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
super().__init__(_snake_case , *_snake_case , **_snake_case )
__a = TFRegNetMainLayer(_snake_case , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.regnet(
pixel_values=_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , a , )
class __A( a , a ):
def __init__( self , _snake_case , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
super().__init__(_snake_case , *_snake_case , **_snake_case )
__a = config.num_labels
__a = TFRegNetMainLayer(_snake_case , name='''regnet''' )
# classification head
__a = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.regnet(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case , training=_snake_case )
__a = outputs.pooler_output if return_dict else outputs[1]
__a = self.classifier[0](_snake_case )
__a = self.classifier[1](_snake_case )
__a = None if labels is None else self.hf_compute_loss(labels=_snake_case , logits=_snake_case )
if not return_dict:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states )
| 33
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
A : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
A : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Dict:
for attribute in key.split('''.''' ):
__a = getattr(a__ , a__ )
if weight_type is not None:
__a = getattr(a__ , a__ ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__a = True
if "*" in mapped_key:
__a = name.split(a__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = '''weight'''
else:
__a = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> int:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True ) -> Tuple:
if config_path is not None:
__a = UniSpeechSatConfig.from_pretrained(a__ )
else:
__a = UniSpeechSatConfig()
__a = ''''''
if is_finetuned:
__a = UniSpeechSatForCTC(a__ )
else:
__a = UniSpeechSatForPreTraining(a__ )
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__a = model[0].eval()
recursively_load_weights(a__ , a__ )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A : Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 33
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __A( a ):
snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''text''': Value('''string''' )} )
snake_case_ = Features({} )
snake_case_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 33
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A( a ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a = bertabert.config.encoder.vocab_size
__a = tokenizer.sep_token_id
__a = tokenizer.cls_token_id
__a = 128
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__a = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__a = train_dataset.select(range(32 ) )
__a = val_dataset.select(range(16 ) )
__a = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__a = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
__a = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
__a = inputs.input_ids
__a = inputs.attention_mask
__a = outputs.input_ids
__a = outputs.input_ids.copy()
__a = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__a = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
__a = pred.label_ids
__a = pred.predictions
# all unnecessary tokens are removed
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__a = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
__a = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__a = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__a = self.get_auto_remove_tmp_dir()
__a = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__a = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 33
| 1
|
A : Optional[int] = [
(1_0_0_0, 'M'),
(9_0_0, 'CM'),
(5_0_0, 'D'),
(4_0_0, 'CD'),
(1_0_0, 'C'),
(9_0, 'XC'),
(5_0, 'L'),
(4_0, 'XL'),
(1_0, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __lowerCAmelCase ( a__ ) -> int:
__a = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
__a = 0
__a = 0
while place < len(a__ ):
if (place + 1 < len(a__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __lowerCAmelCase ( a__ ) -> str:
__a = []
for arabic, roman in ROMAN:
((__a) , (__a)) = divmod(a__ , a__ )
result.append(roman * factor )
if number == 0:
break
return "".join(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
def __lowerCAmelCase ( a__ , a__ ) -> Tuple:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__a = (boundary[1] - boundary[0]) / steps
__a = boundary[0]
__a = boundary[1]
__a = make_points(a__ , a__ , a__ )
__a = 0.0
y += (h / 2.0) * f(a__ )
for i in x_i:
# print(i)
y += h * f(a__ )
y += (h / 2.0) * f(a__ )
return y
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Any:
__a = a + h
while x < (b - h):
yield x
__a = x + h
def __lowerCAmelCase ( a__ ) -> Union[str, Any]: # enter your function here
__a = (x - 0) * (x - 0)
return y
def __lowerCAmelCase ( ) -> List[str]:
__a = 0.0 # Lower bound of integration
__a = 1.0 # Upper bound of integration
__a = 10.0 # define number of steps or resolution
__a = [a, b] # define boundary of integration
__a = method_a(a__ , a__ )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 33
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __A( a ):
snake_case_ = 0
snake_case_ = False
snake_case_ = 3.0
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _snake_case )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
A : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
A : Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
A : int = torch.nn.Linear(1_0_0, 2_0_0)
A : Optional[int] = accelerator.prepare(model)
# Check the values changed in kwargs
A : List[Any] = ''
A : Tuple = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 33
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
A : List[str] = get_tests_dir('fixtures')
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case ) as mock_head:
__a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(_snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
__a = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
__a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_snake_case )
@is_staging_test
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Optional[Any]:
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> str:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = ViTImageProcessor.from_pretrained(_snake_case )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_snake_case , repo_id='''test-image-processor''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = ViTImageProcessor.from_pretrained(_snake_case )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_snake_case , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
__a = CustomImageProcessor.from_pretrained(_snake_case )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
__a = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 33
|
import os
# Precomputes a list of the 100 first triangular numbers
A : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __lowerCAmelCase ( ) -> Tuple:
__a = os.path.dirname(os.path.realpath(a__ ) )
__a = os.path.join(a__ , '''words.txt''' )
__a = ''''''
with open(a__ ) as f:
__a = f.readline()
__a = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
__a = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 33
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
A : Dict = parser.parse_args()
A : Tuple = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
A : List[Any] = CLIPImageProcessor()
A : Any = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
A : Dict = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['PerceiverFeatureExtractor']
A : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
def __lowerCAmelCase ( a__ ) -> list:
for i in range(len(a__ ) - 1 , 0 , -1 ):
__a = False
for j in range(a__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__a , __a = unsorted[j - 1], unsorted[j]
__a = True
for j in range(a__ ):
if unsorted[j] > unsorted[j + 1]:
__a , __a = unsorted[j + 1], unsorted[j]
__a = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
A : Optional[int] = [int(item) for item in user_input.split(',')]
print(F"{cocktail_shaker_sort(unsorted) = }")
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __A( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=400 , _snake_case=True , _snake_case=32 , _snake_case=True , ) -> List[str]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size_divisor
__a = do_rescale
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __A( a , unittest.TestCase ):
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(_snake_case , '''size_divisor''' ) )
self.assertTrue(hasattr(_snake_case , '''resample''' ) )
self.assertTrue(hasattr(_snake_case , '''do_rescale''' ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 33
|
from string import ascii_uppercase
A : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
A : Union[str, Any] = dict(enumerate(ascii_uppercase))
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = len(a__ )
__a = 0
while True:
if x == i:
__a = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__a = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__a = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowerCAmelCase ( ) -> None:
__a = '''THE GERMAN ATTACK'''
__a = '''SECRET'''
__a = generate_key(a__ , a__ )
__a = cipher_text(a__ , a__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(a__ , a__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 33
| 1
|
import pytest
import datasets
# Import fixture modules as plugins
A : Dict = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __lowerCAmelCase ( a__ ) -> str:
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=a__ )
def __lowerCAmelCase ( a__ , a__ ) -> Tuple:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__a = tmp_path_factory.getbasetemp() / '''cache'''
__a = test_hf_cache_home / '''datasets'''
__a = test_hf_cache_home / '''metrics'''
__a = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(a__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(a__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(a__ ) )
__a = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(a__ ) )
__a = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(a__ ) )
@pytest.fixture(autouse=a__ , scope='''session''' )
def __lowerCAmelCase ( ) -> Dict:
datasets.disable_progress_bar()
@pytest.fixture(autouse=a__ )
def __lowerCAmelCase ( a__ ) -> Dict:
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , a__ )
@pytest.fixture
def __lowerCAmelCase ( a__ ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , a__ )
| 33
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A : str = parser.parse_args()
if args.model_type == "roberta":
A : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
A : Any = 'roberta'
elif args.model_type == "gpt2":
A : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
A : List[str] = 'transformer'
A : Dict = model.state_dict()
A : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A : Union[str, Any] = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A : Any = F"{prefix}.embeddings.{w}.weight"
A : Union[str, Any] = state_dict[param_name]
for w in ["weight", "bias"]:
A : List[Any] = F"{prefix}.embeddings.LayerNorm.{w}"
A : List[str] = state_dict[param_name]
# Transformer Blocks #
A : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A : Any = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
A : List[str] = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A : Optional[int] = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F"lm_head.dense.{w}"]
A : List[str] = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A : List[str] = state_dict[F"{prefix}.ln_f.{w}"]
A : Dict = state_dict['lm_head.weight']
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 33
| 1
|
import operator as op
A : int = 'scaler.pt'
A : List[str] = 'pytorch_model'
A : List[str] = 'random_states'
A : List[str] = 'optimizer'
A : Dict = 'scheduler'
A : Optional[Any] = 'pytorch_model.bin'
A : Any = 'pytorch_model.bin.index.json'
A : Any = 'model.safetensors'
A : Union[str, Any] = 'model.safetensors.index.json'
A : Union[str, Any] = '1.10.2'
A : List[Any] = 'py38'
A : int = '4.17.0'
A : List[Any] = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
A : List[Any] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
A : List[Any] = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
A : Dict = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
A : Dict = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
A : Dict = '2.0.1'
A : str = ['pdsh', 'standard', 'openmpi', 'mvapich']
A : int = ['default', 'reduce-overhead', 'max-autotune']
A : Optional[Any] = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
A : int = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
A : Tuple = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
A : Optional[Any] = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 33
|
import os
import numpy
import onnx
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = a.name
__a = b.name
__a = ''''''
__a = ''''''
__a = a == b
__a = name_a
__a = name_b
return res
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> str:
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Union[str, Any]:
__a = list(model.graph.initializer )
__a = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__a = inits[i].name
__a = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def __lowerCAmelCase ( a__ ) -> str:
__a = os.path.dirname(a__ )
__a = os.path.basename(a__ )
__a = onnx.load(os.path.join(a__ , a__ ) )
__a = list(model.graph.initializer )
__a = set()
__a = {}
__a = []
__a = 0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
__a = inits[j].data_type
__a = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , a__ )
total_reduced_size += mem_size
__a = inits[i].name
__a = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
__a = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
__a = sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
__a = '''optimized_''' + model_file_name
__a = os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model
| 33
| 1
|
def __lowerCAmelCase ( a__ , a__ ) -> Any:
__a = [1]
for i in range(2 , a__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__a = []
__a = list(range(a__ ) )
# Find permutation
while factorials:
__a = factorials.pop()
__a , __a = divmod(a__ , a__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Dict = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
def __lowerCAmelCase ( a__ , a__ ) -> str:
if not (isinstance(a__ , a__ ) and isinstance(a__ , a__ )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
__a = len(a__ )
__a = len(a__ )
__a = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__a = 0
__a = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__a = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__a = i
__a = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A : str = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
A : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( ) -> Tuple:
__a = '''https://pypi.org/pypi/diffusers/json'''
__a = json.loads(request.urlopen(a__ ).read() )['''releases'''].keys()
return sorted(a__ , key=lambda a__ : version.Version(a__ ) )
def __lowerCAmelCase ( ) -> List[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a__ )
os.makedirs(a__ , exist_ok=a__ )
__a = Path(a__ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
init_hf_modules()
__a = Path(a__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a__ , exist_ok=a__ )
__a = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import .xxx`
__a = re.findall('''^\s*import\s+\.(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Unique-ify
return list(set(a__ ) )
def __lowerCAmelCase ( a__ ) -> Any:
__a = False
__a = [module_file]
__a = []
# Let's recurse through all relative imports
while not no_change:
__a = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a__ ) )
__a = Path(a__ ).parent
__a = [str(module_path / m ) for m in new_imports]
__a = [f for f in new_import_files if f not in all_relative_imports]
__a = [F"""{f}.py""" for f in new_import_files]
__a = len(a__ ) == 0
all_relative_imports.extend(a__ )
return all_relative_imports
def __lowerCAmelCase ( a__ ) -> str:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import xxx`
__a = re.findall('''^\s*import\s+(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Only keep the top-level module
__a = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__a = list(set(a__ ) )
__a = []
for imp in imports:
try:
importlib.import_module(a__ )
except ImportError:
missing_packages.append(a__ )
if len(a__ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F"""{', '.join(a__ )}. Run `pip install {' '.join(a__ )}`""" )
return get_relative_imports(a__ )
def __lowerCAmelCase ( a__ , a__ ) -> Dict:
__a = module_path.replace(os.path.sep , '''.''' )
__a = importlib.import_module(a__ )
if class_name is None:
return find_pipeline_class(a__ )
return getattr(a__ , a__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
from ..pipelines import DiffusionPipeline
__a = dict(inspect.getmembers(a__ , inspect.isclass ) )
__a = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a__ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
__a = cls
return pipeline_class
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , ) -> Tuple:
__a = str(a__ )
__a = os.path.join(a__ , a__ )
if os.path.isfile(a__ ):
__a = module_file_or_url
__a = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__a = get_diffusers_versions()
# cut ".dev0"
__a = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__a = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__a = F"""v{revision}"""
elif revision == "main":
__a = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
__a = COMMUNITY_PIPELINES_URL.format(revision=a__ , pipeline=a__ )
try:
__a = cached_download(
a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = '''git'''
__a = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__a = hf_hub_download(
a__ , a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__a = check_imports(a__ )
# Now we move the module inside our cached dynamic modules.
__a = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a__ )
__a = Path(a__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a__ , submodule_path / module_file )
for module_needed in modules_needed:
__a = F"""{module_needed}.py"""
shutil.copy(os.path.join(a__ , a__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a__ , a__ ):
__a = use_auth_token
elif use_auth_token is True:
__a = HfFolder.get_token()
else:
__a = None
__a = model_info(a__ , revision=a__ , token=a__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__a = submodule_path / commit_hash
__a = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a__ )
if not (submodule_path / module_file).exists():
shutil.copy(a__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a__ , F"""{module_needed}.py""" , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return os.path.join(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ) -> Tuple:
__a = get_cached_module_file(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return get_class_in_module(a__ , final_module.replace('''.py''' , '''''' ) )
| 33
| 1
|
import string
from math import logaa
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
__a = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __lowerCAmelCase ( a__ , a__ ) -> tuple[int, int]:
__a = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
__a = corpus_without_punctuation.split('''\n''' )
__a = term.lower()
return (len([doc for doc in docs if term in doc] ), len(a__ ))
def __lowerCAmelCase ( a__ , a__ , a__=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def __lowerCAmelCase ( a__ , a__ ) -> float:
return round(tf * idf , 3 )
| 33
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : List[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __A( a ):
snake_case_ = '''table-transformer'''
snake_case_ = ['''past_key_values''']
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=100 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , **_snake_case , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
__a = backbone_config.get('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_snake_case )
# set timm attributes to None
__a , __a , __a = None, None, None
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = encoder_layers
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return 12
| 33
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.