code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (_UpperCamelCase ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_UpperCamelCase , '_dynamo' ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = True ):
__lowerCAmelCase : List[str] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase : str = is_compiled_module(_UpperCamelCase )
if is_compiled:
__lowerCAmelCase : List[str] = model
__lowerCAmelCase : Optional[int] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase : Optional[Any] = getattr(_UpperCamelCase , 'forward' )
__lowerCAmelCase : List[str] = model.__dict__.pop('_original_forward' , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , '__wrapped__' ):
__lowerCAmelCase : List[str] = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase : Dict = forward
if getattr(_UpperCamelCase , '_converted_to_transformer_engine' , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
__lowerCAmelCase : Dict = model
__lowerCAmelCase : Tuple = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def __lowerCAmelCase (**_UpperCamelCase ):
for key, value in kwargs.items():
__lowerCAmelCase : List[str] = str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (_UpperCamelCase ):
if not hasattr(_UpperCamelCase , '__qualname__' ) and not hasattr(_UpperCamelCase , '__name__' ):
__lowerCAmelCase : Optional[Any] = getattr(_UpperCamelCase , '__class__' , _UpperCamelCase )
if hasattr(_UpperCamelCase , '__qualname__' ):
return obj.__qualname__
if hasattr(_UpperCamelCase , '__name__' ):
return obj.__name__
return str(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase : Optional[Any] = value
return destination
def __lowerCAmelCase (_UpperCamelCase = None ):
if port is None:
__lowerCAmelCase : Tuple = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 86
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_a : Optional[int]= False
_a : int= False
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Optional[Any]:
'''simple docstring'''
return TrainCommand(UpperCAmelCase_ )
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Any:
__snake_case : Any = parser.add_parser('train' , help='CLI tool to train a model on a task.')
train_parser.add_argument(
'--train_data' , type=_A , required=_A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_A , default=0 , help='Column of the dataset csv file with example labels.')
train_parser.add_argument(
'--column_text' , type=_A , default=1 , help='Column of the dataset csv file with example texts.')
train_parser.add_argument(
'--column_id' , type=_A , default=2 , help='Column of the dataset csv file with example ids.')
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).')
train_parser.add_argument('--validation_data' , type=_A , default='' , help='path to validation dataset.')
train_parser.add_argument(
'--validation_split' , type=_A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_A , default='./' , help='path to saved the trained model.')
train_parser.add_argument(
'--task' , type=_A , default='text_classification' , help='Task to train the model on.')
train_parser.add_argument(
'--model' , type=_A , default='bert-base-uncased' , help='Model\'s name or path to stored model.')
train_parser.add_argument('--train_batch_size' , type=_A , default=32 , help='Batch size for training.')
train_parser.add_argument('--valid_batch_size' , type=_A , default=64 , help='Batch size for validation.')
train_parser.add_argument('--learning_rate' , type=_A , default=3E-5 , help='Learning rate.')
train_parser.add_argument('--adam_epsilon' , type=_A , default=1E-08 , help='Epsilon for Adam optimizer.')
train_parser.set_defaults(func=_A)
def __init__(self : int , _A : Namespace) -> Tuple:
__snake_case : Optional[int] = logging.get_logger('transformers-cli/training')
__snake_case : Optional[int] = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=_A)
__snake_case : List[Any] = args.output
__snake_case : Any = args.column_label
__snake_case : str = args.column_text
__snake_case : Any = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
__snake_case : List[str] = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
__snake_case : List[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
__snake_case : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = args.validation_split
__snake_case : str = args.train_batch_size
__snake_case : Any = args.valid_batch_size
__snake_case : Union[str, Any] = args.learning_rate
__snake_case : str = args.adam_epsilon
def _lowercase (self : List[str]) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowercase (self : str) -> int:
raise NotImplementedError
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 172
| 0
|
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase ) -> List[str]:
print("""Loading config file...""" )
def flatten_yaml_as_dict(_lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase="." ):
snake_case__ : Dict = []
for k, v in d.items():
snake_case__ : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(_snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(_snake_case , _snake_case , sep=_snake_case ).items() )
else:
items.append((new_key, v) )
return dict(_snake_case )
snake_case__ : Dict = argparse.Namespace()
with open(_snake_case , """r""" ) as yaml_file:
try:
snake_case__ : Dict = yaml.load(_snake_case , Loader=yaml.FullLoader )
snake_case__ : List[Any] = flatten_yaml_as_dict(_snake_case )
for k, v in flat_cfg.items():
setattr(_snake_case , _snake_case , _snake_case )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(_snake_case , str(_snake_case ) ) )
return config
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : Tuple = MobileViTVaConfig()
snake_case__ : Any = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
snake_case__ : str = 1_000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
snake_case__ : Optional[Any] = 384
else:
snake_case__ : Optional[int] = 256
snake_case__ : Optional[int] = '''imagenet-1k-id2label.json'''
elif task_name.startswith("""imagenet21k_to_1k_""" ):
snake_case__ : Any = 21_000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
snake_case__ : Dict = 384
else:
snake_case__ : Tuple = 256
snake_case__ : str = '''imagenet-22k-id2label.json'''
elif task_name.startswith("""ade20k_""" ):
snake_case__ : int = 151
snake_case__ : List[str] = 512
snake_case__ : int = '''ade20k-id2label.json'''
snake_case__ : Optional[Any] = True
elif task_name.startswith("""voc_""" ):
snake_case__ : Optional[Any] = 21
snake_case__ : Union[str, Any] = 512
snake_case__ : Union[str, Any] = '''pascal-voc-id2label.json'''
snake_case__ : Dict = True
# orig_config
snake_case__ : Union[str, Any] = load_orig_config_file(_snake_case )
assert getattr(_snake_case , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
snake_case__ : Any = getattr(_snake_case , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(_snake_case , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
snake_case__ : List[Any] = getattr(_snake_case , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
snake_case__ : Tuple = getattr(_snake_case , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
snake_case__ : Dict = getattr(_snake_case , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
snake_case__ : Tuple = getattr(_snake_case , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
snake_case__ : Dict = getattr(_snake_case , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
snake_case__ : Any = '''huggingface/label-files'''
snake_case__ : Optional[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : str = {int(_snake_case ): v for k, v in idalabel.items()}
snake_case__ : Optional[int] = idalabel
snake_case__ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : Dict = dct.pop(_snake_case )
snake_case__ : str = val
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> Tuple:
if base_model:
snake_case__ : int = ''''''
else:
snake_case__ : Optional[Any] = '''mobilevitv2.'''
snake_case__ : Any = []
for k in state_dict.keys():
if k[:8] == "encoder.":
snake_case__ : int = k[8:]
else:
snake_case__ : Optional[Any] = k
if ".block." in k:
snake_case__ : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
snake_case__ : List[Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
snake_case__ : Optional[Any] = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
snake_case__ : str = k_new.replace("""conv_1.""" , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
snake_case__ : List[Any] = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
snake_case__ : Optional[int] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
snake_case__ : Any = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
snake_case__ : Dict = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
snake_case__ : Optional[int] = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
snake_case__ : Union[str, Any] = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
snake_case__ : List[str] = [0, 1]
elif i == 4:
snake_case__ : Optional[int] = [0, 1, 2, 3]
elif i == 5:
snake_case__ : Dict = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
snake_case__ : Any = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
snake_case__ : Optional[int] = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
snake_case__ : int = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
snake_case__ : Any = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
snake_case__ : str = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
snake_case__ : Dict = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
snake_case__ : Optional[int] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
snake_case__ : Optional[int] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
snake_case__ : Any = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
snake_case__ : List[Any] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
snake_case__ : Dict = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
snake_case__ : Optional[Any] = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(_snake_case )
for k in keys_to_ignore:
state_dict.pop(_snake_case , _snake_case )
def __snake_case( ) -> str:
snake_case__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
snake_case__ : List[Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : List[Any] = get_mobilevitva_config(_snake_case , _snake_case )
# load original state_dict
snake_case__ : Tuple = torch.load(_snake_case , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
snake_case__ : Dict = MobileViTVaForSemanticSegmentation(_snake_case ).eval()
snake_case__ : str = False
else:
snake_case__ : Optional[int] = MobileViTVaForImageClassification(_snake_case ).eval()
snake_case__ : int = False
# remove and rename some keys of load the original model
snake_case__ : str = checkpoint
remove_unused_keys(_snake_case )
snake_case__ : List[str] = create_rename_keys(_snake_case , base_model=_snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# load modified state_dict
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
snake_case__ : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
snake_case__ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Union[str, Any] = model(**_snake_case )
# verify classification model
if task_name.startswith("""imagenet""" ):
snake_case__ : Optional[Any] = outputs.logits
snake_case__ : Dict = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
snake_case__ : List[str] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , _snake_case , atol=1e-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 352
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""Input value must be an 'int' type""" )
snake_case__ : List[str] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 0
|
"""simple docstring"""
import sys
snake_case__ : str = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _snake_case ( _snake_case : str = N ):
lowerCAmelCase : int = -sys.maxsize - 1
for i in range(len(_snake_case ) - 12 ):
lowerCAmelCase : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowerCAmelCase : Any = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 60
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase : Optional[int] = ['text', 'image', 'audio']
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(a , a ):
inputs.append(create_inputs(a ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for output in outputs:
if isinstance(a , (str, AgentText) ):
output_types.append('text' )
elif isinstance(a , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(a , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class _A :
def UpperCAmelCase ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_ : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tool(*_SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_ : List[Any] = [outputs]
self.assertListEqual(output_types(_SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ : List[str] = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(_SCREAMING_SNAKE_CASE , self.tool.outputs ):
SCREAMING_SNAKE_CASE_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ : Tuple = []
for _input, input_type in zip(_SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 253
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowercase (_A , _A , _A , _A , _A , ):
"""simple docstring"""
_lowerCAmelCase : str = len(_A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _A , _A , )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : list[list[str]] = []
depth_first_search([] , [] , [] , _A , _A )
# Print all the boards
for board in boards:
for column in board:
print(_A )
print('' )
print(len(_A ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 25
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25
| 1
|
"""simple docstring"""
import math
def __a ( __lowerCamelCase, __lowerCamelCase ):
if (
not isinstance(__lowerCamelCase, (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def __a ( __lowerCamelCase, __lowerCamelCase ):
if (
not isinstance(__lowerCamelCase, (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=None , lowercase_=2 , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ : int = (image_size // patch_size) ** 2
UpperCAmelCase_ : Optional[Any] = num_patches + 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = DeiTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = DeiTForMaskedImageModeling(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Optional[Any] = DeiTForMaskedImageModeling(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[int] = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Dict = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = DeiTModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowercase_ )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=False ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCAmelCase_ : Dict = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ : List[str] = model_class(lowercase_ )
model.gradient_checkpointing_enable()
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCAmelCase_ : Any = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_ ),
*get_values(lowercase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCAmelCase_ : str = problem_type["title"]
UpperCAmelCase_ : List[Any] = problem_type["num_labels"]
UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : int = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ : List[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase_ : Tuple = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_ ) as warning_list:
UpperCAmelCase_ : List[str] = model(**lowercase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = DeiTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __a ( ):
UpperCAmelCase_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
lowercase_ )
UpperCAmelCase_ : List[str] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowercase_ )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase_ : str = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase_ : str = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowercase_ , return_tensors="pt" )
UpperCAmelCase_ : List[str] = inputs.pixel_values.to(lowercase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : int = model(lowercase_ )
| 61
| 1
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__UpperCAmelCase : Dict = """A painting of a squirrel eating a burger"""
__UpperCAmelCase : Any = jax.device_count()
__UpperCAmelCase : List[str] = num_samples * [prompt]
__UpperCAmelCase : Tuple = sd_pipe.prepare_inputs(UpperCamelCase )
__UpperCAmelCase : int = replicate(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = shard(UpperCamelCase )
__UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
__UpperCAmelCase : Tuple = jax.random.split(UpperCamelCase , jax.device_count() )
__UpperCAmelCase : Tuple = sd_pipe(UpperCamelCase , UpperCamelCase , UpperCamelCase , num_inference_steps=25 , jit=UpperCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__UpperCAmelCase : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase : Optional[int] = images[0, 253:256, 253:256, -1]
__UpperCAmelCase : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase : Optional[int] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = """stabilityai/stable-diffusion-2"""
__UpperCAmelCase : Any = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase , subfolder="""scheduler""" )
__UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase , scheduler=UpperCamelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__UpperCAmelCase : List[Any] = scheduler_params
__UpperCAmelCase : Optional[int] = """A painting of a squirrel eating a burger"""
__UpperCAmelCase : Tuple = jax.device_count()
__UpperCAmelCase : Optional[int] = num_samples * [prompt]
__UpperCAmelCase : Optional[int] = sd_pipe.prepare_inputs(UpperCamelCase )
__UpperCAmelCase : List[Any] = replicate(UpperCamelCase )
__UpperCAmelCase : str = shard(UpperCamelCase )
__UpperCAmelCase : str = jax.random.PRNGKey(0 )
__UpperCAmelCase : Tuple = jax.random.split(UpperCamelCase , jax.device_count() )
__UpperCAmelCase : Any = sd_pipe(UpperCamelCase , UpperCamelCase , UpperCamelCase , num_inference_steps=25 , jit=UpperCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__UpperCAmelCase : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase : Dict = images[0, 253:256, 253:256, -1]
__UpperCAmelCase : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase : List[Any] = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 368
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ):
'''simple docstring'''
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 320
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__A = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__A = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__A = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = XLMRobertaTokenizer
SCREAMING_SNAKE_CASE_ : int = XLMRobertaTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
def __UpperCAmelCase ( self : int ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Any:
a = "<pad>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 10_02 )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = XLMRobertaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
a = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=True
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
# Save tokenizer rust, legacy_format=False
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase )
a = tokenizer_p.save_pretrained(__lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCamelCase )
a = tokenizer_p.from_pretrained(__lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) )
shutil.rmtree(__lowerCamelCase )
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCamelCase , f.name )
a = XLMRobertaTokenizer(f.name , keep_accents=__lowerCamelCase )
a = pickle.dumps(__lowerCamelCase )
pickle.loads(__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> str:
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(__lowerCamelCase )
a = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Dict ) -> Any:
a = "Hello World!"
a = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Tuple ) -> int:
a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
a = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCamelCase , self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
# fmt: off
a = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 107
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 1_0_0_0 ) -> int:
'''simple docstring'''
lowercase = 2**power
lowercase = str(lowerCAmelCase__ )
lowercase = list(lowerCAmelCase__ )
lowercase = 0
for i in list_num:
sum_of_num += int(lowerCAmelCase__ )
return sum_of_num
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
__lowerCAmelCase : Dict =solution(power)
print("""Sum of the digits is: """, result)
| 351
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( __A ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = str(__A )
UpperCamelCase__ = [n]
for i in range(1 , len(__A ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if len(str(__A ) ) > 3:
if not is_prime(int(str(__A )[-3:] ) ) or not is_prime(int(str(__A )[:3] ) ):
return False
return True
def _UpperCamelCase ( __A = 11 ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = 13
while len(__A ) != count:
if validate(__A ):
UpperCamelCase__ = list_truncated_nums(__A )
if all(is_prime(__A ) for i in list_nums ):
list_truncated_primes.append(__A )
num += 2
return list_truncated_primes
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""")
| 80
|
__lowerCAmelCase = range(2, 20 + 1)
__lowerCAmelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCAmelCase = {}
def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> Optional[int]:
lowercase__: str = sum(a_i[j] for j in range(snake_case , len(snake_case ) ) )
lowercase__: Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(snake_case ) , snake_case ) ) )
lowercase__ , lowercase__: str = 0, 0
lowercase__: Tuple = n - i
lowercase__: Dict = memo.get(snake_case )
if sub_memo is not None:
lowercase__: Optional[Any] = sub_memo.get(snake_case )
if jumps is not None and len(snake_case ) > 0:
# find and make the largest jump without going over
lowercase__: int = -1
for _k in range(len(snake_case ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__: Union[str, Any] = _k
break
if max_jump >= 0:
lowercase__ , lowercase__ , lowercase__: Any = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__: str = diff + c
for j in range(min(snake_case , len(snake_case ) ) ):
lowercase__ , lowercase__: Dict = divmod(snake_case , 10 )
if new_c > 0:
add(snake_case , snake_case , snake_case )
else:
lowercase__: List[Any] = []
else:
lowercase__: Optional[Any] = {c: []}
lowercase__: Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__ , lowercase__: Union[str, Any] = next_term(snake_case , k - 1 , i + dn , snake_case )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__ , lowercase__: Dict = compute(snake_case , snake_case , i + dn , snake_case )
diff += _diff
dn += terms_jumped
lowercase__: Any = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__: str = 0
while j < len(snake_case ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(snake_case , (diff, dn, k) )
return (diff, dn)
def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> str:
if i >= n:
return 0, i
if k > len(snake_case ):
a_i.extend([0 for _ in range(k - len(snake_case ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__: List[Any] = i
lowercase__ , lowercase__ , lowercase__: Any = 0, 0, 0
for j in range(len(snake_case ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__: str = ds_c + ds_b
diff += addend
lowercase__: List[str] = 0
for j in range(snake_case ):
lowercase__: Any = a_i[j] + addend
lowercase__ , lowercase__: List[Any] = divmod(snake_case , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(snake_case , snake_case , snake_case )
return diff, i - start_i
def snake_case_ ( snake_case , snake_case , snake_case ) -> int:
for j in range(snake_case , len(snake_case ) ):
lowercase__: str = digits[j] + addend
if s >= 10:
lowercase__ , lowercase__: Any = divmod(snake_case , 10 )
lowercase__: Any = addend // 10 + quotient
else:
lowercase__: Union[str, Any] = s
lowercase__: Union[str, Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowercase__ , lowercase__: Union[str, Any] = divmod(snake_case , 10 )
digits.append(snake_case )
def snake_case_ ( snake_case = 10**15 ) -> int:
lowercase__: Optional[Any] = [1]
lowercase__: int = 1
lowercase__: Tuple = 0
while True:
lowercase__ , lowercase__: str = next_term(snake_case , 20 , i + dn , snake_case )
dn += terms_jumped
if dn == n - i:
break
lowercase__: Dict = 0
for j in range(len(snake_case ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 196
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : Optional[Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int]=3 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : List[str]=3 ,_UpperCAmelCase : Tuple=10 ,_UpperCAmelCase : List[str]=[10, 20, 30, 40] ,_UpperCAmelCase : Dict=[1, 1, 2, 1] ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : List[str]=True ,_UpperCAmelCase : Optional[Any]="relu" ,_UpperCAmelCase : Any=3 ,_UpperCAmelCase : List[str]=None ,):
_a : List[Any] = parent
_a : Tuple = batch_size
_a : Optional[Any] = image_size
_a : Tuple = num_channels
_a : Optional[Any] = embeddings_size
_a : Any = hidden_sizes
_a : str = depths
_a : Optional[int] = is_training
_a : List[str] = use_labels
_a : List[Any] = hidden_act
_a : str = num_labels
_a : Union[str, Any] = scope
_a : int = len(_UpperCAmelCase )
def __lowercase ( self : Optional[int] ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = None
if self.use_labels:
_a : int = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : List[str] ):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def __lowercase ( self : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ):
_a : List[str] = RegNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ):
_a : Any = self.num_labels
_a : int = RegNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Union[str, Any] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : List[str] ):
_a : Dict = self.prepare_config_and_inputs()
_a : Union[str, Any] = config_and_inputs
_a : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowerCAmelCase : Optional[int] = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Tuple = False
lowerCAmelCase : int = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[Any] = False
def __lowercase ( self : str ):
_a : Tuple = RegNetModelTester(self )
_a : Any = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Optional[Any] ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __lowercase ( self : Dict ):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __lowercase ( self : Optional[Any] ):
pass
def __lowercase ( self : Dict ):
_a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = model_class(_UpperCAmelCase )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[str] = [*signature.parameters.keys()]
_a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(config=_UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(_UpperCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def __lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ):
_a : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
_a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_a : Optional[int] = layer_type
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : int = RegNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> str:
_a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : List[Any] ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : Any ):
_a : Union[str, Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCAmelCase )
_a : Optional[Any] = self.default_image_processor
_a : Optional[int] = prepare_img()
_a : List[str] = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Any = model(**_UpperCAmelCase )
# verify the logits
_a : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : List[str] = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 371
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
set_seed(770)
__lowerCAmelCase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__lowerCAmelCase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
__lowerCAmelCase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__lowerCAmelCase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_a : int = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[str]:
if model_type == "text":
_a : List[str] = BarkSemanticModel
_a : Optional[Any] = BarkSemanticConfig
_a : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
_a : Tuple = BarkCoarseModel
_a : str = BarkCoarseConfig
_a : str = BarkCoarseGenerationConfig
elif model_type == "fine":
_a : List[str] = BarkFineModel
_a : Optional[Any] = BarkFineConfig
_a : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
_a : Dict = f"""{model_type}_small""" if use_small else model_type
_a : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
_a : int = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
_a : List[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_a : Dict = model_args['vocab_size']
_a : Dict = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_a : List[Any] = model_args.pop('n_head' )
_a : Any = model_args.pop('n_embd' )
_a : List[Any] = model_args.pop('n_layer' )
_a : Optional[int] = ConfigClass(**checkpoint['model_args'] )
_a : List[str] = ModelClass(config=lowerCAmelCase_ )
_a : Tuple = GenerationConfigClass()
_a : Optional[Any] = model_generation_config
_a : Optional[Any] = checkpoint['model']
# fixup checkpoint
_a : int = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
_a : str = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
_a : List[Any] = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
_a : List[Any] = state_dict.pop(lowerCAmelCase_ )
_a : List[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_a : Tuple = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_a : Tuple = set(model.state_dict().keys() ) - set(state_dict.keys() )
_a : Optional[Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
_a : Dict = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
_a : Tuple = checkpoint['best_val_loss'].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss""" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[Any]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_a : Optional[int] = 'cpu' # do conversion on cpu
_a : Tuple = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
_a : List[Any] = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
_a : Any = _bark_load_model(lowerCAmelCase_ , 'cpu' , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
_a : int = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_a : Any = 5
_a : List[str] = 10
if model_type in ["text", "coarse"]:
_a : Dict = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_a : Dict = bark_model(lowerCAmelCase_ )[0]
_a : Tuple = model(lowerCAmelCase_ )
# take last logits
_a : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
_a : List[str] = 3
_a : List[Any] = 8
_a : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_a : Union[str, Any] = model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Any:
_a : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : Any = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[Any] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[str] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_a : str = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
_a : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
_a : int = BarkFineModel.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_a : Any = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_a : Optional[Any] = BarkModel(lowerCAmelCase_ )
_a : List[str] = semantic
_a : Union[str, Any] = coarseAcoustic
_a : Optional[int] = fineAcoustic
_a : Optional[Any] = codec
_a : List[Any] = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 107
| 0
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 74
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : Tuple, _snake_case : Any, _snake_case : int=1_3, _snake_case : Optional[int]=3_2, _snake_case : Tuple=2, _snake_case : Any=3, _snake_case : Tuple=1_6, _snake_case : Tuple=[1, 2, 1], _snake_case : Dict=[2, 2, 4], _snake_case : str=2, _snake_case : Union[str, Any]=2.0, _snake_case : Dict=True, _snake_case : Dict=0.0, _snake_case : str=0.0, _snake_case : str=0.1, _snake_case : List[str]="gelu", _snake_case : int=False, _snake_case : Optional[Any]=True, _snake_case : List[Any]=0.0_2, _snake_case : Union[str, Any]=1e-5, _snake_case : Union[str, Any]=True, _snake_case : List[Any]=None, _snake_case : Any=True, _snake_case : List[Any]=1_0, _snake_case : str=8, ) ->Union[str, Any]:
snake_case__ : Any = parent
snake_case__ : Tuple = batch_size
snake_case__ : Tuple = image_size
snake_case__ : Any = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : Tuple = embed_dim
snake_case__ : Any = depths
snake_case__ : Any = num_heads
snake_case__ : List[str] = window_size
snake_case__ : Dict = mlp_ratio
snake_case__ : Optional[int] = qkv_bias
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : str = hidden_act
snake_case__ : Union[str, Any] = use_absolute_embeddings
snake_case__ : Union[str, Any] = patch_norm
snake_case__ : Any = layer_norm_eps
snake_case__ : Tuple = initializer_range
snake_case__ : Dict = is_training
snake_case__ : Any = scope
snake_case__ : Optional[Any] = use_labels
snake_case__ : str = type_sequence_label_size
snake_case__ : List[Any] = encoder_stride
def lowercase_ ( self : Tuple ) ->str:
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Optional[int] ) ->Optional[int]:
return SwinvaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def lowercase_ ( self : Optional[int], _snake_case : str, _snake_case : List[str], _snake_case : int ) ->Dict:
snake_case__ : List[Any] = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[int] = model(_snake_case )
snake_case__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case__ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self : Optional[Any], _snake_case : Any, _snake_case : List[str], _snake_case : Dict ) ->List[Any]:
snake_case__ : List[str] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : Optional[Any] = 1
snake_case__ : Optional[int] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Any = model(_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self : List[str], _snake_case : int, _snake_case : List[Any], _snake_case : Optional[int] ) ->Any:
snake_case__ : Tuple = self.type_sequence_label_size
snake_case__ : int = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = model(_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self : Any ) ->Dict:
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : List[str] = config_and_inputs
snake_case__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : Union[str, Any] ) ->Dict:
snake_case__ : Optional[int] = SwinvaModelTester(self )
snake_case__ : int = ConfigTester(self, config_class=_snake_case, embed_dim=3_7 )
def lowercase_ ( self : Tuple ) ->int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : Any ) ->str:
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def lowercase_ ( self : Any ) ->Union[str, Any]:
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def lowercase_ ( self : str ) ->Union[str, Any]:
pass
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
snake_case__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case, nn.Linear ) )
def lowercase_ ( self : List[str] ) ->Optional[int]:
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(_snake_case )
snake_case__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], _snake_case )
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
for model_class in self.all_model_classes:
snake_case__ : str = True
snake_case__ : Union[str, Any] = False
snake_case__ : Tuple = True
snake_case__ : int = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
snake_case__ : Optional[int] = model(**self._prepare_for_class(_snake_case, _snake_case ) )
snake_case__ : List[str] = outputs.attentions
snake_case__ : List[Any] = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ), _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : str = True
snake_case__ : Tuple = config.window_size**2
snake_case__ : Optional[int] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(_snake_case, _snake_case ) )
snake_case__ : Tuple = outputs.attentions
self.assertEqual(len(_snake_case ), _snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], )
snake_case__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
snake_case__ : Optional[int] = True
snake_case__ : Dict = True
snake_case__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
snake_case__ : Optional[int] = model(**self._prepare_for_class(_snake_case, _snake_case ) )
if hasattr(self.model_tester, 'num_hidden_states_types' ):
snake_case__ : str = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case__ : Dict = 2
self.assertEqual(out_len + added_hidden_states, len(_snake_case ) )
snake_case__ : Any = outputs.attentions
self.assertEqual(len(_snake_case ), _snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], )
def lowercase_ ( self : Dict, _snake_case : Tuple, _snake_case : Any, _snake_case : int, _snake_case : Optional[int] ) ->str:
snake_case__ : Dict = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(_snake_case, _snake_case ) )
snake_case__ : Dict = outputs.hidden_states
snake_case__ : int = getattr(
self.model_tester, 'expected_num_hidden_layers', len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ), _snake_case )
# Swinv2 has a different seq_length
snake_case__ : int = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
snake_case__ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ), _snake_case )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = reshaped_hidden_states[0].shape
snake_case__ : Any = (
reshaped_hidden_states[0].view(_snake_case, _snake_case, height * width ).permute(0, 2, 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def lowercase_ ( self : str ) ->List[Any]:
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = True
self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Dict = True
self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, _snake_case )
def lowercase_ ( self : List[str] ) ->str:
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = 3
snake_case__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case__ : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case__ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case__ : int = True
self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
self.check_hidden_states_output(_snake_case, _snake_case, _snake_case, (padded_height, padded_width) )
def lowercase_ ( self : List[str] ) ->Optional[int]:
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def lowercase_ ( self : List[Any] ) ->str:
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def lowercase_ ( self : str ) ->Union[str, Any]:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Dict = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowercase_ ( self : Optional[int] ) ->List[str]:
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : Union[str, Any] ) ->List[str]:
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self : int ) ->List[Any]:
snake_case__ : Any = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
_snake_case )
snake_case__ : int = self.default_image_processor
snake_case__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
snake_case__ : Optional[Any] = image_processor(images=_snake_case, return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
snake_case__ : List[str] = model(**_snake_case )
# verify the logits
snake_case__ : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, _snake_case )
snake_case__ : Optional[int] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _snake_case, atol=1e-4 ) )
| 277
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Optional[int]:
UpperCamelCase : List[str] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
UpperCamelCase : List[str] = DetaConfig(
backbone_config=snake_case__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=snake_case__ , with_box_refine=snake_case__ , two_stage=snake_case__ , )
# set labels
UpperCamelCase : Dict = 'huggingface/label-files'
if "o365" in model_name:
UpperCamelCase : Union[str, Any] = 366
UpperCamelCase : Optional[int] = 'object365-id2label.json'
else:
UpperCamelCase : Tuple = 91
UpperCamelCase : Optional[int] = 'coco-detection-id2label.json'
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : Any = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
UpperCamelCase : Optional[int] = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCamelCase : List[str] = idalabel
UpperCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( snake_case__ : Any ) -> Any:
UpperCamelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : str ) -> Optional[int]:
UpperCamelCase : Tuple = dct.pop(snake_case__ )
UpperCamelCase : Any = val
def UpperCamelCase ( snake_case__ : int , snake_case__ : str ) -> Union[str, Any]:
UpperCamelCase : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase : Optional[Any] = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCamelCase : List[Any] = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : Optional[Any] = in_proj_weight[:dim, :]
UpperCamelCase : int = in_proj_bias[: dim]
UpperCamelCase : Tuple = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase : List[str] = in_proj_bias[
dim : dim * 2
]
UpperCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
UpperCamelCase : List[Any] = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : int ) -> Optional[Any]:
# transformer decoder self-attention layers
UpperCamelCase : Tuple = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase : List[Any] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase : Optional[int] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : Optional[Any] = in_proj_weight[:hidden_size, :]
UpperCamelCase : Any = in_proj_bias[:hidden_size]
UpperCamelCase : Union[str, Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase : Tuple = in_proj_weight[-hidden_size:, :]
UpperCamelCase : List[Any] = in_proj_bias[-hidden_size:]
def UpperCamelCase ( ) -> Optional[int]:
UpperCamelCase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase : int = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Tuple ) -> str:
UpperCamelCase : Any = get_deta_config(snake_case__ )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase : Optional[Any] = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
UpperCamelCase : List[Any] = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
UpperCamelCase : Union[str, Any] = torch.load(snake_case__ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(snake_case__ , param.shape )
# rename keys
UpperCamelCase : List[str] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_swin_q_k_v(snake_case__ , config.backbone_config )
read_in_decoder_q_k_v(snake_case__ , snake_case__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase : Dict = state_dict.pop(snake_case__ )
UpperCamelCase : Any = val
if "input_proj" in key:
UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ )
UpperCamelCase : Tuple = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase : str = state_dict.pop(snake_case__ )
UpperCamelCase : str = val
# finally, create HuggingFace model and load state dict
UpperCamelCase : Tuple = DetaForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
UpperCamelCase : Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(snake_case__ )
# load image processor
UpperCamelCase : Union[str, Any] = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
UpperCamelCase : str = prepare_img()
UpperCamelCase : Any = processor(images=snake_case__ , return_tensors='pt' )
UpperCamelCase : Any = encoding['pixel_values']
UpperCamelCase : Any = model(pixel_values.to(snake_case__ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase : Dict = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCamelCase : str = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCamelCase : Union[str, Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case__ ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F"""jozhang97/{model_name}""" )
processor.push_to_hub(F"""jozhang97/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 103
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=19, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> Optional[int]:
UpperCamelCase : Any = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : Tuple = is_training
UpperCamelCase : List[str] = use_input_mask
UpperCamelCase : Optional[int] = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : int = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : str = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : Any = num_choices
UpperCamelCase : Tuple = scope
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = EsmConfig(
vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=SCREAMING_SNAKE_CASE_, esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False}, )
return config
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Union[str, Any] = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE_ ).float()
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Tuple = config_and_inputs
UpperCamelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase__ : int = ()
UpperCAmelCase__ : List[str] = {} if is_torch_available() else {}
UpperCAmelCase__ : Optional[int] = False
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = EsmFoldModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> int:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Does not support attention outputs' )
def snake_case_ ( self ) -> Tuple:
pass
@unittest.skip
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip('ESMFold only has one output format.' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def snake_case_ ( self ) -> Tuple:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@require_torch
class lowerCAmelCase_ ( a__ ):
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
UpperCamelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )['positions']
UpperCamelCase : int = torch.tensor([2.58_28, 0.79_93, -10.93_34], dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 103
| 1
|
"""simple docstring"""
from math import factorial
def lowercase__ ( snake_case_ :Union[str, Any] = 100 ):
return sum(int(A__ ) for x in str(factorial(A__ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 332
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('''https://huggingface.co''' )
| 104
| 0
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
if not isinstance(A_ , A_ ):
lowerCAmelCase__ : int = f'Input value of [number={number}] must be an integer'
raise TypeError(A_ )
if number < 0:
return False
lowerCAmelCase__ : List[Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Any = []
for line in lines:
lowerCAmelCase__ : int = re.sub(r'''#.*''' , '''''' , A_ ) # remove comments
if line:
filtered_lines.append(A_ )
lowerCAmelCase__ : Optional[int] = '''\n'''.join(A_ )
# Make a hash from all this code
lowerCAmelCase__ : int = full_str.encode('''utf-8''' )
return shaaaa(A_ ).hexdigest()
# get importable module names and hash for caching
__UpperCamelCase : Any = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__UpperCamelCase : Optional[Any] = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__UpperCamelCase : Union[str, Any] = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
__UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 74
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str:
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : Dict = use_auxiliary_loss
lowerCAmelCase__ : Union[str, Any] = num_queries
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : List[str] = min_size
lowerCAmelCase__ : int = max_size
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : List[Any] = mask_feature_size
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase )
lowerCAmelCase__ : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5
).float()
lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long()
lowerCAmelCase__ : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self ) -> Dict:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states
lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]:
with torch.no_grad():
lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : Dict = model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(
pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__lowercase : int = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Dict = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : str = MaskFormerModelTester(self )
lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def UpperCAmelCase_ ( self ) -> str:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def UpperCAmelCase_ ( self ) -> Any:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(__UpperCAmelCase )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Dict = [*signature.parameters.keys()]
lowerCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2
lowerCAmelCase__ : Any = {
"""pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ),
"""class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(),
}
lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self ) -> int:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Dict = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> List[str]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Tuple = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCAmelCase = 1e-4
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ) -> List[Any]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : str = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : Dict = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : List[str] = prepare_img()
lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : Optional[int] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : str = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : str = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : Tuple = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : str = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]]
lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCAmelCase__ : Any = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 37
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 16
__lowercase = 32
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase :int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase :Tuple = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase :int = config['''lr''']
__UpperCamelCase :str = int(config['''num_epochs'''] )
__UpperCamelCase :Any = int(config['''seed'''] )
__UpperCamelCase :Dict = int(config['''batch_size'''] )
__UpperCamelCase :Optional[Any] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase :Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__UpperCamelCase :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCamelCase :Dict = 1
__UpperCamelCase :Tuple = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase :str = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__UpperCamelCase :Dict = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase :List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase :Dict = 0
# Now we train the model
__UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Optional[int] = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = outputs.loss
__UpperCamelCase :str = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase :Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
__UpperCamelCase :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase :int = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of train epochs.''' , )
__UpperCamelCase :List[str] = parser.parse_args()
__UpperCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 0
|
'''simple docstring'''
import string
import numpy
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : int ):
return b if a == 0 else greatest_common_divisor(b % a ,lowerCamelCase )
class __lowerCamelCase :
"""simple docstring"""
a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a = numpy.vectorize(lambda a_ : x % 36 )
a = numpy.vectorize(a_ )
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : numpy.ndarray):
_A : Union[str, Any] = self.modulus(SCREAMING_SNAKE_CASE) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_A : Optional[int] = encrypt_key.shape[0]
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
return self.key_string.index(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int):
return self.key_string[round(SCREAMING_SNAKE_CASE)]
def A ( self : List[Any]):
_A : Optional[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : int = det % len(self.key_string)
_A : str = len(self.key_string)
if greatest_common_divisor(SCREAMING_SNAKE_CASE , len(self.key_string)) != 1:
_A : Optional[int] = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(SCREAMING_SNAKE_CASE)
def A ( self : int , SCREAMING_SNAKE_CASE : str):
_A : List[Any] = [char for char in text.upper() if char in self.key_string]
_A : List[str] = chars[-1]
while len(SCREAMING_SNAKE_CASE) % self.break_key != 0:
chars.append(SCREAMING_SNAKE_CASE)
return "".join(SCREAMING_SNAKE_CASE)
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str):
_A : Optional[int] = self.process_text(text.upper())
_A : List[str] = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[Any] = text[i : i + self.break_key]
_A : Optional[int] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : List[str] = self.modulus(self.encrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[
0
]
_A : str = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def A ( self : Union[str, Any]):
_A : List[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : Optional[int] = det % len(self.key_string)
_A : List[str] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
_A : Union[str, Any] = i
break
_A : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(SCREAMING_SNAKE_CASE))
def A ( self : Any , SCREAMING_SNAKE_CASE : str):
_A : List[str] = self.make_decrypt_key()
_A : Dict = self.process_text(text.upper())
_A : str = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[int] = text[i : i + self.break_key]
_A : Union[str, Any] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : Optional[int] = self.modulus(decrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[0]
_A : Tuple = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
_A : List[Any] = int(input('Enter the order of the encryption key: ' ) )
_A : List[str] = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(lowerCamelCase ):
_A : str = [int(lowerCamelCase ) for x in input().split()]
hill_matrix.append(lowerCamelCase )
_A : Dict = HillCipher(numpy.array(lowerCamelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_A : List[str] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_A : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(lowerCamelCase ) )
elif option == "2":
_A : int = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 227
|
'''simple docstring'''
import string
import numpy
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : int ):
return b if a == 0 else greatest_common_divisor(b % a ,lowerCamelCase )
class __lowerCamelCase :
"""simple docstring"""
a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a = numpy.vectorize(lambda a_ : x % 36 )
a = numpy.vectorize(a_ )
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : numpy.ndarray):
_A : Union[str, Any] = self.modulus(SCREAMING_SNAKE_CASE) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_A : Optional[int] = encrypt_key.shape[0]
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
return self.key_string.index(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int):
return self.key_string[round(SCREAMING_SNAKE_CASE)]
def A ( self : List[Any]):
_A : Optional[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : int = det % len(self.key_string)
_A : str = len(self.key_string)
if greatest_common_divisor(SCREAMING_SNAKE_CASE , len(self.key_string)) != 1:
_A : Optional[int] = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(SCREAMING_SNAKE_CASE)
def A ( self : int , SCREAMING_SNAKE_CASE : str):
_A : List[Any] = [char for char in text.upper() if char in self.key_string]
_A : List[str] = chars[-1]
while len(SCREAMING_SNAKE_CASE) % self.break_key != 0:
chars.append(SCREAMING_SNAKE_CASE)
return "".join(SCREAMING_SNAKE_CASE)
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str):
_A : Optional[int] = self.process_text(text.upper())
_A : List[str] = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[Any] = text[i : i + self.break_key]
_A : Optional[int] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : List[str] = self.modulus(self.encrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[
0
]
_A : str = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def A ( self : Union[str, Any]):
_A : List[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : Optional[int] = det % len(self.key_string)
_A : List[str] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
_A : Union[str, Any] = i
break
_A : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(SCREAMING_SNAKE_CASE))
def A ( self : Any , SCREAMING_SNAKE_CASE : str):
_A : List[str] = self.make_decrypt_key()
_A : Dict = self.process_text(text.upper())
_A : str = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[int] = text[i : i + self.break_key]
_A : Union[str, Any] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : Optional[int] = self.modulus(decrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[0]
_A : Tuple = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
_A : List[Any] = int(input('Enter the order of the encryption key: ' ) )
_A : List[str] = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(lowerCamelCase ):
_A : str = [int(lowerCamelCase ) for x in input().split()]
hill_matrix.append(lowerCamelCase )
_A : Dict = HillCipher(numpy.array(lowerCamelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_A : List[str] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_A : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(lowerCamelCase ) )
elif option == "2":
_A : int = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 227
| 1
|
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return int((input_a, input_a).count(0 ) != 0 )
def lowercase_ ( ):
assert nand_gate(0 ,0 ) == 1
assert nand_gate(0 ,1 ) == 1
assert nand_gate(1 ,0 ) == 1
assert nand_gate(1 ,1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 25
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "xlm-roberta"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-1_2 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> str:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =hidden_act
a =intermediate_size
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =type_vocab_size
a =initializer_range
a =layer_norm_eps
a =position_embedding_type
a =use_cache
a =classifier_dropout
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 215
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase_ : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
lowerCamelCase_ : Any = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase_ : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _A ( lowercase ):
"""simple docstring"""
with open(lowercase , '''rb''' ) as f:
a =Image.open(lowercase )
return im.convert('''RGB''' )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "A folder containing the training data."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "A folder containing the validation data."} )
__lowerCAmelCase = field(
default=0.1_5, metadata={"help": "Percent to split off of train for validation."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
}, )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="google/vit-base-patch16-224-in21k", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_SCREAMING_SNAKE_CASE )}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
__lowerCAmelCase = field(
default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Name or path of preprocessor config."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, )
def _A ( lowercase ):
"""simple docstring"""
a =torch.stack([example['''pixel_values'''] for example in examples] )
a =torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _A ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a =training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
a ={}
if data_args.train_dir is not None:
a =os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
a =os.path.join(data_args.validation_dir , '''**''' )
a =load_dataset(
'''imagefolder''' , data_files=lowercase , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
a =None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase ) and data_args.train_val_split > 0.0:
a =dataset['''train'''].train_test_split(data_args.train_val_split )
a =split['''train''']
a =split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a =dataset['''train'''].features['''labels'''].names
a , a ={}, {}
for i, label in enumerate(lowercase ):
a =str(lowercase )
a =label
# Load the accuracy metric from the datasets package
a =evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
a =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase ) , labelaid=lowercase , idalabel=lowercase , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a =AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
a =AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
a =image_processor.size['''shortest_edge''']
else:
a =(image_processor.size['''height'''], image_processor.size['''width'''])
a =Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
a =Compose(
[
RandomResizedCrop(lowercase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
a =Compose(
[
Resize(lowercase ),
CenterCrop(lowercase ),
ToTensor(),
normalize,
] )
def train_transforms(lowercase ):
a =[
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(lowercase ):
a =[_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
a =(
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
a =(
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowercase )
# Initalize our trainer
a =Trainer(
model=lowercase , args=lowercase , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
a =None
if training_args.resume_from_checkpoint is not None:
a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a =last_checkpoint
a =trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a =trainer.evaluate()
trainer.log_metrics('''eval''' , lowercase )
trainer.save_metrics('''eval''' , lowercase )
# Write model card and (optionally) push to hub
a ={
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 215
| 1
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
_snake_case = parser.parse_args()
_snake_case = 'cpu'
_snake_case = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
_snake_case = 'path-to-your-trained-model'
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 64, 64)
_snake_case = torch.rand(1) * 999
_snake_case = torch.randn(2, 77, 768)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 666
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'generator': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 250
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.dummy_uncond_unet
_lowercase : Dict = KarrasVeScheduler()
_lowercase : Any = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : Any = torch.manual_seed(0 )
_lowercase : List[Any] = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type="numpy" ).images
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : List[str] = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type="numpy" , return_dict=_UpperCamelCase )[0]
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = "google/ncsnpp-celebahq-256"
_lowercase : Any = UNetaDModel.from_pretrained(_UpperCamelCase )
_lowercase : List[Any] = KarrasVeScheduler()
_lowercase : int = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : Tuple = pipe(num_inference_steps=20 , generator=_UpperCamelCase , output_type="numpy" ).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase : Tuple = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 250
| 1
|
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
def get_masked_lm_array(lowerCAmelCase ):
_lowerCAmelCase = f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_lowerCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
if "kernel" in name:
_lowerCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
def get_encoder_array(lowerCAmelCase ):
_lowerCAmelCase = f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_lowerCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
if "kernel" in name:
_lowerCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
def get_encoder_layer_array(lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_lowerCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
if "kernel" in name:
_lowerCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
def get_encoder_attention_layer_array(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_lowerCAmelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = array.reshape(lowerCAmelCase )
if "kernel" in name:
_lowerCAmelCase = array.transpose()
return torch.from_numpy(lowerCAmelCase )
print(f"Loading model based on config from {config_path}..." )
_lowerCAmelCase = BertConfig.from_json_file(lowerCAmelCase )
_lowerCAmelCase = BertForMaskedLM(lowerCAmelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_lowerCAmelCase = model.bert.encoder.layer[layer_index]
# Self-attention
_lowerCAmelCase = layer.attention.self
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
_lowerCAmelCase = layer.attention.output
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
_lowerCAmelCase = get_encoder_attention_layer_array(
lowerCAmelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_attention_layer_norm/gamma""" )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_attention_layer_norm/beta""" )
# Intermediate
_lowerCAmelCase = layer.intermediate
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_intermediate_dense/kernel""" )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_intermediate_dense/bias""" )
# Output
_lowerCAmelCase = layer.output
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_dense/kernel""" )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_dense/bias""" )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_layer_norm/gamma""" )
_lowerCAmelCase = get_encoder_layer_array(lowerCAmelCase , """_output_layer_norm/beta""" )
# Embeddings
_lowerCAmelCase = get_encoder_array("""_position_embedding_layer/embeddings""" )
_lowerCAmelCase = get_encoder_array("""_type_embedding_layer/embeddings""" )
_lowerCAmelCase = get_encoder_array("""_embedding_norm_layer/gamma""" )
_lowerCAmelCase = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
_lowerCAmelCase = model.cls.predictions.transform
_lowerCAmelCase = get_masked_lm_array("""dense/kernel""" )
_lowerCAmelCase = get_masked_lm_array("""dense/bias""" )
_lowerCAmelCase = get_masked_lm_array("""layer_norm/gamma""" )
_lowerCAmelCase = get_masked_lm_array("""layer_norm/beta""" )
_lowerCAmelCase = get_masked_lm_array("""embedding_table""" )
# Pooling
_lowerCAmelCase = BertPooler(config=lowerCAmelCase )
_lowerCAmelCase = get_encoder_array("""_pooler_layer/kernel""" )
_lowerCAmelCase = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(lowerCAmelCase )
# Integration test - should load without any errors ;)
_lowerCAmelCase = BertForMaskedLM.from_pretrained(lowerCAmelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
A__ : Dict =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 220
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
A__ : List[str] =TypeVar('''T''')
class UpperCAmelCase ( Generic[T] ):
def __init__( self : Tuple , __snake_case : bool = True ) -> None:
_lowerCAmelCase = {} # dictionary of lists
_lowerCAmelCase = directed
def lowercase__ ( self : Union[str, Any] , __snake_case : T , __snake_case : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
self.adj_list[destination_vertex].append(__snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
_lowerCAmelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__snake_case )
_lowerCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCAmelCase = [destination_vertex]
_lowerCAmelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
_lowerCAmelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCAmelCase = [destination_vertex]
_lowerCAmelCase = []
return self
def __repr__( self : int ) -> str:
return pformat(self.adj_list )
| 220
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class _lowerCamelCase ( lowercase__ ):
"""simple docstring"""
UpperCAmelCase_ : str ='''markuplm'''
def __init__( self , UpperCAmelCase=30522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-12 , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=256 , UpperCAmelCase=1024 , UpperCAmelCase=216 , UpperCAmelCase=1001 , UpperCAmelCase=32 , UpperCAmelCase=50 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__snake_case : List[Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Tuple = hidden_act
__snake_case : Dict = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : int = initializer_range
__snake_case : Dict = layer_norm_eps
__snake_case : int = position_embedding_type
__snake_case : Optional[Any] = use_cache
__snake_case : Optional[int] = classifier_dropout
# additional properties
__snake_case : Tuple = max_depth
__snake_case : Union[str, Any] = max_xpath_tag_unit_embeddings
__snake_case : List[Any] = max_xpath_subs_unit_embeddings
__snake_case : str = tag_pad_id
__snake_case : Optional[int] = subs_pad_id
__snake_case : Optional[int] = xpath_unit_hidden_size
| 326
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str:
"""simple docstring"""
a_ : Tuple = []
for line in lines:
a_ : Any = re.sub(R'#.*' , '' , __A ) # remove comments
if line:
filtered_lines.append(__A )
a_ : Tuple = '\n'.join(__A )
# Make a hash from all this code
a_ : Tuple = full_str.encode('utf-8' )
return shaaaa(__A ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase_ : List[Any] = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase_ : Dict = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase_ : Optional[int] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCAmelCase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 32
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ =logging.get_logger(__name__)
UpperCamelCase_ ={
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _a ( _lowerCAmelCase ):
UpperCamelCase = '''unispeech'''
def __init__( self : Optional[Any], lowerCAmelCase__ : List[Any]=3_2, lowerCAmelCase__ : List[Any]=7_6_8, lowerCAmelCase__ : List[str]=1_2, lowerCAmelCase__ : List[str]=1_2, lowerCAmelCase__ : Any=3_0_7_2, lowerCAmelCase__ : Any="gelu", lowerCAmelCase__ : Union[str, Any]=0.1, lowerCAmelCase__ : List[Any]=0.1, lowerCAmelCase__ : Optional[int]=0.1, lowerCAmelCase__ : Optional[Any]=0.0, lowerCAmelCase__ : str=0.0, lowerCAmelCase__ : Union[str, Any]=0.1, lowerCAmelCase__ : Optional[int]=0.1, lowerCAmelCase__ : Optional[Any]=0.02, lowerCAmelCase__ : Tuple=1e-5, lowerCAmelCase__ : Tuple="group", lowerCAmelCase__ : List[str]="gelu", lowerCAmelCase__ : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2), lowerCAmelCase__ : List[Any]=(5, 2, 2, 2, 2, 2, 2), lowerCAmelCase__ : Optional[int]=(1_0, 3, 3, 3, 3, 2, 2), lowerCAmelCase__ : str=False, lowerCAmelCase__ : Optional[Any]=1_2_8, lowerCAmelCase__ : List[Any]=1_6, lowerCAmelCase__ : Union[str, Any]=False, lowerCAmelCase__ : Any=True, lowerCAmelCase__ : Dict=0.05, lowerCAmelCase__ : Any=1_0, lowerCAmelCase__ : Optional[int]=2, lowerCAmelCase__ : Optional[Any]=0.0, lowerCAmelCase__ : Dict=1_0, lowerCAmelCase__ : Dict=0, lowerCAmelCase__ : List[Any]=3_2_0, lowerCAmelCase__ : List[Any]=2, lowerCAmelCase__ : List[str]=0.1, lowerCAmelCase__ : str=1_0_0, lowerCAmelCase__ : int=2_5_6, lowerCAmelCase__ : str=2_5_6, lowerCAmelCase__ : Optional[Any]=0.1, lowerCAmelCase__ : Union[str, Any]="mean", lowerCAmelCase__ : Any=False, lowerCAmelCase__ : List[str]=False, lowerCAmelCase__ : Union[str, Any]=2_5_6, lowerCAmelCase__ : Dict=8_0, lowerCAmelCase__ : Any=0, lowerCAmelCase__ : Dict=1, lowerCAmelCase__ : Optional[Any]=2, lowerCAmelCase__ : Union[str, Any]=0.5, **lowerCAmelCase__ : Any, ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__, pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__ )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : List[Any] = feat_extract_norm
_UpperCamelCase : List[str] = feat_extract_activation
_UpperCamelCase : Optional[int] = list(lowerCAmelCase__ )
_UpperCamelCase : Tuple = list(lowerCAmelCase__ )
_UpperCamelCase : Any = list(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = conv_bias
_UpperCamelCase : Optional[Any] = num_conv_pos_embeddings
_UpperCamelCase : Dict = num_conv_pos_embedding_groups
_UpperCamelCase : str = len(self.conv_dim )
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : List[Any] = num_attention_heads
_UpperCamelCase : int = hidden_dropout
_UpperCamelCase : Tuple = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : Optional[int] = feat_proj_dropout
_UpperCamelCase : Optional[Any] = final_dropout
_UpperCamelCase : Optional[int] = layerdrop
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : str = num_ctc_classes
_UpperCamelCase : Tuple = vocab_size
_UpperCamelCase : Tuple = do_stable_layer_norm
_UpperCamelCase : List[Any] = use_weighted_layer_sum
_UpperCamelCase : Tuple = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : Optional[Any] = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : List[str] = mask_time_length
_UpperCamelCase : Tuple = mask_time_min_masks
_UpperCamelCase : List[Any] = mask_feature_prob
_UpperCamelCase : int = mask_feature_length
_UpperCamelCase : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCamelCase : List[Any] = num_codevectors_per_group
_UpperCamelCase : List[str] = num_codevector_groups
_UpperCamelCase : List[Any] = contrastive_logits_temperature
_UpperCamelCase : Union[str, Any] = feat_quantizer_dropout
_UpperCamelCase : Dict = num_negatives
_UpperCamelCase : Tuple = codevector_dim
_UpperCamelCase : str = proj_codevector_dim
_UpperCamelCase : Optional[int] = diversity_loss_weight
# ctc loss
_UpperCamelCase : List[str] = ctc_loss_reduction
_UpperCamelCase : Optional[Any] = ctc_zero_infinity
# pretraining loss
_UpperCamelCase : List[Any] = replace_prob
@property
def snake_case ( self : List[Any] ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 128
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _a ( _lowerCAmelCase ):
UpperCamelCase = ['''image_processor''', '''feature_extractor''']
UpperCamelCase = '''TvltImageProcessor'''
UpperCamelCase = '''TvltFeatureExtractor'''
def __init__( self : Union[str, Any], lowerCAmelCase__ : str, lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(image_processor=lowerCAmelCase__, feature_extractor=lowerCAmelCase__ )
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : int = feature_extractor
def __call__( self : List[str], lowerCAmelCase__ : Optional[int]=None, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Dict=None, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Optional[int]=False, lowerCAmelCase__ : str=False, *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : Optional[int], ) -> Dict:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
_UpperCamelCase : Optional[int] = None
if images is not None:
_UpperCamelCase : Optional[int] = self.image_processor(lowerCAmelCase__, mask_pixel=lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__ )
if images_mixed is not None:
_UpperCamelCase : str = self.image_processor(lowerCAmelCase__, is_mixed=lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__ )
if audio is not None:
_UpperCamelCase : Union[str, Any] = self.feature_extractor(
lowerCAmelCase__, *lowerCAmelCase__, sampling_rate=lowerCAmelCase__, mask_audio=lowerCAmelCase__, **lowerCAmelCase__ )
_UpperCamelCase : str = {}
if audio is not None:
output_dict.update(lowerCAmelCase__ )
if images is not None:
output_dict.update(lowerCAmelCase__ )
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase__ )
return output_dict
@property
def snake_case ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[str] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 128
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Union[str, Any] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class __A( SCREAMING_SNAKE_CASE__ ):
snake_case_ = '''encodec'''
def __init__( self , _snake_case=[1.5, 3.0, 6.0, 12.0, 24.0] , _snake_case=24_000 , _snake_case=1 , _snake_case=False , _snake_case=None , _snake_case=None , _snake_case=128 , _snake_case=32 , _snake_case=1 , _snake_case=[8, 5, 4, 2] , _snake_case="weight_norm" , _snake_case=7 , _snake_case=7 , _snake_case=3 , _snake_case=2 , _snake_case=True , _snake_case="reflect" , _snake_case=2 , _snake_case=2 , _snake_case=1.0 , _snake_case=1_024 , _snake_case=None , _snake_case=True , **_snake_case , ) -> int:
'''simple docstring'''
__a = target_bandwidths
__a = sampling_rate
__a = audio_channels
__a = normalize
__a = chunk_length_s
__a = overlap
__a = hidden_size
__a = num_filters
__a = num_residual_layers
__a = upsampling_ratios
__a = norm_type
__a = kernel_size
__a = last_kernel_size
__a = residual_kernel_size
__a = dilation_growth_rate
__a = use_causal_conv
__a = pad_mode
__a = compress
__a = num_lstm_layers
__a = trim_right_ratio
__a = codebook_size
__a = codebook_dim if codebook_dim is not None else hidden_size
__a = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**A__ )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 6
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =IFInpaintingPipeline
lowercase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowercase_ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ : str =PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self):
return self._get_dummy_components()
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 101
| 0
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_500_000 )-> int:
_lowerCamelCase = defaultdict(snake_case )
_lowerCamelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , snake_case , 2 ):
if gcd(snake_case , snake_case ) > 1:
continue
_lowerCamelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(snake_case , limit + 1 , snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'{solution() = }')
| 80
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A_ : str =logging.get_logger(__name__)
A_ : int =OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
A_ : Optional[int] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> Any:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCamelCase = model_type_to_module_name(snake_case )
_lowerCamelCase = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(snake_case , snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case , '__name__' , snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCamelCase = importlib.import_module('transformers' )
if hasattr(snake_case , snake_case ):
return getattr(snake_case , snake_case )
return None
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, os.PathLike] , snake_case : Optional[Union[str, os.PathLike]] = None , snake_case : bool = False , snake_case : bool = False , snake_case : Optional[Dict[str, str]] = None , snake_case : Optional[Union[bool, str]] = None , snake_case : Optional[str] = None , snake_case : bool = False , **snake_case : List[str] , )-> Optional[int]:
_lowerCamelCase = get_file_from_repo(
snake_case , snake_case , cache_dir=snake_case , force_download=snake_case , resume_download=snake_case , proxies=snake_case , use_auth_token=snake_case , revision=snake_case , local_files_only=snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(snake_case , encoding='utf-8' ) as reader:
return json.load(snake_case )
class __a :
def __init__( self ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(a__ )
def snake_case_ ( cls , a__ , **a__ ):
_lowerCamelCase = kwargs.pop('config' , a__ )
_lowerCamelCase = kwargs.pop('trust_remote_code' , a__ )
_lowerCamelCase = True
_lowerCamelCase , _lowerCamelCase = ImageProcessingMixin.get_image_processor_dict(a__ , **a__ )
_lowerCamelCase = config_dict.get('image_processor_type' , a__ )
_lowerCamelCase = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowerCamelCase = config_dict.pop('feature_extractor_type' , a__ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_lowerCamelCase = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase = config_dict['auto_map']['AutoFeatureExtractor']
_lowerCamelCase = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a__ , a__ ):
_lowerCamelCase = AutoConfig.from_pretrained(a__ , **a__ )
# It could be in `config.image_processor_type``
_lowerCamelCase = getattr(a__ , 'image_processor_type' , a__ )
if hasattr(a__ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_lowerCamelCase = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_lowerCamelCase = image_processor_class_from_name(a__ )
_lowerCamelCase = image_processor_auto_map is not None
_lowerCamelCase = image_processor_class is not None or type(a__ ) in IMAGE_PROCESSOR_MAPPING
_lowerCamelCase = resolve_trust_remote_code(
a__ , a__ , a__ , a__ )
if has_remote_code and trust_remote_code:
_lowerCamelCase = get_class_from_dynamic_module(
a__ , a__ , **a__ )
_lowerCamelCase = kwargs.pop('code_revision' , a__ )
if os.path.isdir(a__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a__ , **a__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a__ , **a__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a__ ) in IMAGE_PROCESSOR_MAPPING:
_lowerCamelCase = IMAGE_PROCESSOR_MAPPING[type(a__ )]
return image_processor_class.from_dict(a__ , **a__ )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def snake_case_ ( a__ , a__ ):
IMAGE_PROCESSOR_MAPPING.register(a__ , a__ )
| 80
| 1
|
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_A = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(__lowercase )
else:
_A = sylvester(number - 1 )
_A = num - 1
_A = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 79
|
'''simple docstring'''
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : list[int] ):
'''simple docstring'''
_A = len(__UpperCAmelCase )
_A = [0] * len_array
if len_array > 0:
_A = array[0]
for i in range(1 , __UpperCAmelCase ):
_A = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCAmelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79
| 1
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ : Optional[int] = logging.get_logger(__name__)
class a :
def __init__( self , __magic_name__ = None , __magic_name__ = None , __magic_name__=None , __magic_name__=None ) -> List[str]:
if not conversation_id:
_a = uuid.uuida()
if past_user_inputs is None:
_a = []
if generated_responses is None:
_a = []
_a = conversation_id
_a = past_user_inputs
_a = generated_responses
_a = text
def __eq__( self , __magic_name__ ) -> int:
if not isinstance(__magic_name__ , __magic_name__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = False ) -> Tuple:
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
_a = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
_a = text
def __UpperCAmelCase ( self ) -> int:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_a = None
def __UpperCAmelCase ( self , __magic_name__ ) -> str:
self.generated_responses.append(__magic_name__ )
def __UpperCAmelCase ( self ) -> List[str]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Dict:
_a = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_a = 'user' if is_user else 'bot'
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
_SCREAMING_SNAKE_CASE , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , *__magic_name__ , **__magic_name__ ) -> Any:
super().__init__(*__magic_name__ , **__magic_name__ )
if self.tokenizer.pad_token_id is None:
_a = self.tokenizer.eos_token
def __UpperCAmelCase ( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> List[Any]:
_a = {}
_a = {}
_a = {}
if min_length_for_response is not None:
_a = min_length_for_response
if minimum_tokens is not None:
_a = minimum_tokens
if "max_length" in generate_kwargs:
_a = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_a = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__magic_name__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __magic_name__ , __magic_name__=0 , **__magic_name__ ) -> int:
_a = super().__call__(__magic_name__ , num_workers=__magic_name__ , **__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=32 ) -> Dict[str, Any]:
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_a = self.tokenizer._build_conversation_input_ids(__magic_name__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_a = self._legacy_parse_and_tokenize(__magic_name__ )
if self.framework == "pt":
_a = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_a = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=10 , **__magic_name__ ) -> List[str]:
_a = generate_kwargs.get('max_length' , self.model.config.max_length )
_a = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
_a = max_length - minimum_tokens
_a = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_a = model_inputs['attention_mask'][:, -trim:]
_a = model_inputs.pop('conversation' )
_a = max_length
_a = self.model.generate(**__magic_name__ , **__magic_name__ )
if self.model.config.is_encoder_decoder:
_a = 1
else:
_a = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=True ) -> Optional[int]:
_a = model_outputs['output_ids']
_a = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , )
_a = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(__magic_name__ )
return conversation
def __UpperCAmelCase ( self , __magic_name__ ) -> Dict:
_a = self.tokenizer.eos_token_id
_a = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) )
if len(__magic_name__ ) > self.tokenizer.model_max_length:
_a = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 104
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ : Optional[int] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ["DeiTFeatureExtractor"]
a_ : List[Any] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104
| 1
|
def a ( snake_case__: float , snake_case__: int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(snake_case__ ) , snake_case__ )
return number - int(snake_case__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 30
|
import requests
snake_case__ : int = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def _a ( lowerCamelCase: str ) -> None:
'''simple docstring'''
__A = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 117
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__(self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def _a (cls , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def _a (cls , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
| 352
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ : Tuple = pad_token_id
UpperCAmelCase__ : Any = max_length
UpperCAmelCase__ : str = vocab
UpperCAmelCase__ : Union[str, Any] = merges
UpperCAmelCase__ : Tuple = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
UpperCAmelCase__ : Tuple = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase ):
"""simple docstring"""
return cls(**_lowerCamelCase )
def _a (self ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.tf_tokenizer(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCAmelCase__ : Optional[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCAmelCase__ , UpperCAmelCase__ : str = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 166
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A , A , A , A , A , A , ) -> str:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def snake_case_( self , A = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def snake_case_( self ) -> List[Any]:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[str]:
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = 1
elif isinstance(A , A ):
_SCREAMING_SNAKE_CASE = len(A )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A )}.' )
# get prompt text embeddings
_SCREAMING_SNAKE_CASE = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = text_embeddings.shape
_SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , A , 1 )
_SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
_SCREAMING_SNAKE_CASE = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='
f' {type(A )}.' )
elif isinstance(A , A ):
_SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
_SCREAMING_SNAKE_CASE = negative_prompt
_SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
_SCREAMING_SNAKE_CASE = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
_SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(A , A , 1 )
_SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_SCREAMING_SNAKE_CASE = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
_SCREAMING_SNAKE_CASE = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
_SCREAMING_SNAKE_CASE = torch.randn(
A , generator=A , device=self.device , dtype=A )
_SCREAMING_SNAKE_CASE = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_SCREAMING_SNAKE_CASE = latents_reference.to(self.device )
_SCREAMING_SNAKE_CASE = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_SCREAMING_SNAKE_CASE = (latents_shape[3] - latents_shape_reference[3]) // 2
_SCREAMING_SNAKE_CASE = (latents_shape[2] - latents_shape_reference[2]) // 2
_SCREAMING_SNAKE_CASE = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_SCREAMING_SNAKE_CASE = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_SCREAMING_SNAKE_CASE = 0 if dx < 0 else dx
_SCREAMING_SNAKE_CASE = 0 if dy < 0 else dy
_SCREAMING_SNAKE_CASE = max(-dx , 0 )
_SCREAMING_SNAKE_CASE = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_SCREAMING_SNAKE_CASE = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_SCREAMING_SNAKE_CASE = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_SCREAMING_SNAKE_CASE = {}
if accepts_eta:
_SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_SCREAMING_SNAKE_CASE = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
_SCREAMING_SNAKE_CASE = 1 / 0.1_8215 * latents
_SCREAMING_SNAKE_CASE = self.vae.decode(A ).sample
_SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_SCREAMING_SNAKE_CASE = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_SCREAMING_SNAKE_CASE = None
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 58
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
lowerCAmelCase_ = [800, 1333]
lowerCAmelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = 330
lowerCAmelCase_ = 14
lowerCAmelCase_ = 6
lowerCAmelCase_ = 1320
elif "yolos_s" in yolos_name:
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCAmelCase_ = [800, 1344]
lowerCAmelCase_ = 91
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''coco-detection-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( _A , _A , _A = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( _A ):
if "backbone" in name:
lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __UpperCamelCase ( _A , _A ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(_A )
if "qkv" in key:
lowerCAmelCase_ = key.split('''.''' )
lowerCAmelCase_ = int(key_split[2] )
lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = val
return orig_state_dict
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A , _A = False ):
lowerCAmelCase_ = get_yolos_config(_A )
# load original state_dict
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model''']
# load 🤗 model
lowerCAmelCase_ = YolosForObjectDetection(_A )
model.eval()
lowerCAmelCase_ = convert_state_dict(_A , _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512
lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes
lowerCAmelCase_ , lowerCAmelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCAmelCase_ = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCAmelCase_ = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCAmelCase_ = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCAmelCase_ = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCAmelCase_ = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCAmelCase_ = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCAmelCase_ = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if push_to_hub:
lowerCAmelCase_ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
lowerCAmelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(_A , organization='''hustvl''' )
model.push_to_hub(_A , organization='''hustvl''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 278
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _A (lowerCAmelCase__ :list[list[float]] ) -> list[list[float]]:
'''simple docstring'''
_a = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_a = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
_a = [[0.0, 0.0], [0.0, 0.0]]
_a , _a = matrix[1][1], matrix[0][0]
_a , _a = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_a = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
_a = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_a = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_a = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_a = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_a = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_a = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_a = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_a = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_a = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_a = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_a = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
_a = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_a = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 104
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> int:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Dict:
_a = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> str:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Any:
_a = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> int:
# pass variant but use the non-variant filenames
_a = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[str]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
# pass variant but use the non-variant filenames
_a = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
| 104
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = FunnelConfig.from_json_file(lowerCAmelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCAmelCase : Optional[Any] = FunnelBaseModel(lowerCAmelCase_ ) if base_model else FunnelModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
A_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 215
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = """laion/clap-htsat-unfused"""
_UpperCAmelCase : int = tempfile.mkdtemp()
def _snake_case ( self ,**a_ ) -> str:
return RobertaTokenizer.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ,**a_ ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : int = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : int = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_UpperCAmelCase : List[Any] = self.get_feature_extractor(do_normalize=a_ ,padding_value=1.0 )
_UpperCAmelCase : Optional[Any] = ClapProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=a_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : str = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Tuple = floats_list((3, 1_000) )
_UpperCAmelCase : int = feature_extractor(a_ ,return_tensors="""np""" )
_UpperCAmelCase : Union[str, Any] = processor(audios=a_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Union[str, Any] = """This is a test string"""
_UpperCAmelCase : Optional[Any] = processor(text=a_ )
_UpperCAmelCase : Any = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Any = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Dict = processor.batch_decode(a_ )
_UpperCAmelCase : Any = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ ,a_ )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[str] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
self.assertListEqual(
processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
| 215
| 1
|
'''simple docstring'''
from math import loga
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
'''simple docstring'''
lowerCamelCase_ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 174
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 't5'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Union[str, Any],lowercase_ : int=3_2_1_2_8,lowercase_ : int=5_1_2,lowercase_ : List[str]=6_4,lowercase_ : Tuple=2_0_4_8,lowercase_ : Any=6,lowercase_ : List[str]=None,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_2,lowercase_ : Dict=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : List[str]=1E-6,lowercase_ : Tuple=1.0,lowercase_ : Any="relu",lowercase_ : Union[str, Any]=True,lowercase_ : Optional[Any]=True,lowercase_ : int=0,lowercase_ : str=1,**lowercase_ : str,)-> Any:
'''simple docstring'''
A__ = vocab_size
A__ = d_model
A__ = d_kv
A__ = d_ff
A__ = num_layers
A__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A__ = num_heads
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = initializer_factor
A__ = feed_forward_proj
A__ = use_cache
A__ = self.feed_forward_proj.split('-' )
A__ = act_info[-1]
A__ = act_info[0] == 'gated'
if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A__ = 'gelu_new'
super().__init__(
pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,)
class A ( _UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Tuple )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
A__ = 'past_encoder_sequence + sequence'
A__ = {0: 'batch'}
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A__ = {0: 'batch', 1: 'decoder_sequence'}
A__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_,direction='inputs' )
return common_inputs
@property
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
return 1_3
| 7
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__A =getLogger(__name__)
__A ="cuda" if torch.cuda.is_available() else "cpu"
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 8 , _UpperCAmelCase : str = DEFAULT_DEVICE , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Optional[int]="summarization" , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Dict = Path(_UpperCAmelCase ).open('''w''' , encoding='''utf-8''' )
__UpperCAmelCase : Optional[int] = str(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
if fpaa:
__UpperCAmelCase : Dict = model.half()
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
__UpperCAmelCase : Dict = time.time()
# update config with task specific params
use_task_specific_params(_UpperCAmelCase , _UpperCAmelCase )
if prefix is None:
__UpperCAmelCase : List[Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCAmelCase , _UpperCAmelCase ) ) ):
__UpperCAmelCase : Any = [prefix + text for text in examples_chunk]
__UpperCAmelCase : Union[str, Any] = tokenizer(_UpperCAmelCase , return_tensors='''pt''' , truncation=_UpperCAmelCase , padding='''longest''' ).to(_UpperCAmelCase )
__UpperCAmelCase : Dict = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_UpperCAmelCase , )
__UpperCAmelCase : List[Any] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__UpperCAmelCase : Dict = int(time.time() - start_time ) # seconds
__UpperCAmelCase : Union[str, Any] = len(_UpperCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ):
'''simple docstring'''
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( _UpperCAmelCase : Any=True ):
'''simple docstring'''
__UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=_UpperCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=_UpperCAmelCase , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=_UpperCAmelCase , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=_UpperCAmelCase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_UpperCAmelCase , default=8 , required=_UpperCAmelCase , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=_UpperCAmelCase , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCAmelCase , __UpperCAmelCase : Dict = parser.parse_known_args()
__UpperCAmelCase : Optional[Any] = parse_numeric_n_bool_cl_kwargs(_UpperCAmelCase )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
__UpperCAmelCase : Optional[int] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCAmelCase : Optional[int] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__UpperCAmelCase : int = generate_summaries_or_translations(
_UpperCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_UpperCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCAmelCase : str = calculate_bleu if '''translation''' in args.task else calculate_rouge
__UpperCAmelCase : Tuple = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCAmelCase : int = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCAmelCase )]
__UpperCAmelCase : dict = score_fn(_UpperCAmelCase , _UpperCAmelCase )
scores.update(_UpperCAmelCase )
if args.dump_args:
scores.update(_UpperCAmelCase )
if args.info:
__UpperCAmelCase : Dict = args.info
if verbose:
print(_UpperCAmelCase )
if args.score_path is not None:
json.dump(_UpperCAmelCase , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 226
| 0
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 360
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = ['''image_processor''', '''tokenizer''']
__snake_case = '''CLIPImageProcessor'''
__snake_case = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
a = kwargs.pop('''feature_extractor''' )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : List[str] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , **__UpperCAmelCase : str ) ->Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
a = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) ->Any:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , )
return self.image_processor
| 26
| 0
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self , lowercase = 128 , lowercase = 256 , lowercase = 2_0_0_0.0 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 64 , lowercase = 2048 , lowercase = 0.1 , ) -> str:
super().__init__()
lowerCamelCase_ = nn.Sequential(
nn.Linear(lowercase , d_model * 4 , bias=lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowercase ) , nn.SiLU() , )
lowerCamelCase_ = nn.Embedding(lowercase , lowercase )
lowerCamelCase_ = False
lowerCamelCase_ = nn.Linear(lowercase , lowercase , bias=lowercase )
lowerCamelCase_ = nn.Dropout(p=lowercase )
lowerCamelCase_ = nn.ModuleList()
for lyr_num in range(lowercase ):
# FiLM conditional T5 decoder
lowerCamelCase_ = DecoderLayer(d_model=lowercase , d_kv=lowercase , num_heads=lowercase , d_ff=lowercase , dropout_rate=lowercase )
self.decoders.append(lowercase )
lowerCamelCase_ = TaLayerNorm(lowercase )
lowerCamelCase_ = nn.Dropout(p=lowercase )
lowerCamelCase_ = nn.Linear(lowercase , lowercase , bias=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCamelCase_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCamelCase_ = self.conditioning_emb(lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCamelCase_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCamelCase_ = torch.broadcast_to(
torch.arange(lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCamelCase_ = self.position_encoding(lowercase )
lowerCamelCase_ = self.continuous_inputs_projection(lowercase )
inputs += position_encodings
lowerCamelCase_ = self.dropout(lowercase )
# decoder: No padding present.
lowerCamelCase_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCamelCase_ = [(x, self.encoder_decoder_mask(lowercase , lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCamelCase_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCamelCase_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCamelCase_ = lyr(
lowercase , conditioning_emb=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )[0]
lowerCamelCase_ = self.decoder_norm(lowercase )
lowerCamelCase_ = self.post_dropout(lowercase )
lowerCamelCase_ = self.spec_out(lowercase )
return spec_out
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=1e-6 ) -> Tuple:
super().__init__()
lowerCamelCase_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowercase , d_kv=lowercase , num_heads=lowercase , dropout_rate=lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowercase , d_kv=lowercase , num_heads=lowercase , dropout_rate=lowercase , layer_norm_epsilon=lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowercase , d_ff=lowercase , dropout_rate=lowercase , layer_norm_epsilon=lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ) -> List[Any]:
lowerCamelCase_ = self.layer[0](
lowercase , conditioning_emb=lowercase , attention_mask=lowercase , )
if encoder_hidden_states is not None:
lowerCamelCase_ = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
lowerCamelCase_ = self.layer[1](
lowercase , key_value_states=lowercase , attention_mask=lowercase , )
# Apply Film Conditional Feed Forward layer
lowerCamelCase_ = self.layer[-1](lowercase , lowercase )
return (hidden_states,)
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
super().__init__()
lowerCamelCase_ = TaLayerNorm(lowercase )
lowerCamelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase )
lowerCamelCase_ = Attention(query_dim=lowercase , heads=lowercase , dim_head=lowercase , out_bias=lowercase , scale_qk=lowercase )
lowerCamelCase_ = nn.Dropout(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None , lowercase=None , ) -> Optional[int]:
# pre_self_attention_layer_norm
lowerCamelCase_ = self.layer_norm(lowercase )
if conditioning_emb is not None:
lowerCamelCase_ = self.FiLMLayer(lowercase , lowercase )
# Self-attention block
lowerCamelCase_ = self.attention(lowercase )
lowerCamelCase_ = hidden_states + self.dropout(lowercase )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
super().__init__()
lowerCamelCase_ = Attention(query_dim=lowercase , heads=lowercase , dim_head=lowercase , out_bias=lowercase , scale_qk=lowercase )
lowerCamelCase_ = TaLayerNorm(lowercase , eps=lowercase )
lowerCamelCase_ = nn.Dropout(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None , lowercase=None , ) -> Dict:
lowerCamelCase_ = self.layer_norm(lowercase )
lowerCamelCase_ = self.attention(
lowercase , encoder_hidden_states=lowercase , attention_mask=attention_mask.squeeze(1 ) , )
lowerCamelCase_ = hidden_states + self.dropout(lowercase )
return layer_output
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
super().__init__()
lowerCamelCase_ = TaDenseGatedActDense(d_model=lowercase , d_ff=lowercase , dropout_rate=lowercase )
lowerCamelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase )
lowerCamelCase_ = TaLayerNorm(lowercase , eps=lowercase )
lowerCamelCase_ = nn.Dropout(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None ) -> Optional[Any]:
lowerCamelCase_ = self.layer_norm(lowercase )
if conditioning_emb is not None:
lowerCamelCase_ = self.film(lowercase , lowercase )
lowerCamelCase_ = self.DenseReluDense(lowercase )
lowerCamelCase_ = hidden_states + self.dropout(lowercase )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase ) -> List[Any]:
super().__init__()
lowerCamelCase_ = nn.Linear(lowercase , lowercase , bias=lowercase )
lowerCamelCase_ = nn.Linear(lowercase , lowercase , bias=lowercase )
lowerCamelCase_ = nn.Linear(lowercase , lowercase , bias=lowercase )
lowerCamelCase_ = nn.Dropout(lowercase )
lowerCamelCase_ = NewGELUActivation()
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
lowerCamelCase_ = self.act(self.wi_a(lowercase ) )
lowerCamelCase_ = self.wi_a(lowercase )
lowerCamelCase_ = hidden_gelu * hidden_linear
lowerCamelCase_ = self.dropout(lowercase )
lowerCamelCase_ = self.wo(lowercase )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase=1e-6 ) -> Tuple:
super().__init__()
lowerCamelCase_ = nn.Parameter(torch.ones(lowercase ) )
lowerCamelCase_ = eps
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowerCamelCase_ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowercase )
lowerCamelCase_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCamelCase_ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(lowercase , 3.0 )) ))
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
super().__init__()
lowerCamelCase_ = nn.Linear(lowercase , out_features * 2 , bias=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> str:
lowerCamelCase_ = self.scale_bias(lowercase )
lowerCamelCase_ , lowerCamelCase_ = torch.chunk(lowercase , 2 , -1 )
lowerCamelCase_ = x * (1 + scale) + shift
return x
| 19
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
| 0
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class snake_case ( a__ ):
__magic_name__ : Dict = ['vqvae']
def __init__( self : Optional[int] , A : List[str] , A : Optional[Any] , A : List[Any] , A : str , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase , mel=_lowerCamelCase , vqvae=_lowerCamelCase )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , _lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self : Optional[int] , A : Union[str, Any] = 1 , A : Union[str, Any] = None , A : Optional[Any] = None , A : List[str] = 0 , A : Optional[int] = 0 , A : List[str] = None , A : Union[str, Any] = None , A : int = 0 , A : Dict = 0 , A : Optional[int] = None , A : Tuple = 0 , A : Optional[int] = None , A : Dict = None , A : Dict=True , ):
'''simple docstring'''
a : Union[str, Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_lowerCamelCase )
a : List[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
a : List[Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_lowerCamelCase , device=self.device , )
a : List[Any] = noise
a : int = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_lowerCamelCase , _lowerCamelCase )
a : Optional[int] = self.mel.audio_slice_to_image(_lowerCamelCase )
a : Dict = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
a : Dict = (input_image / 2_5_5) * 2 - 1
a : str = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(_lowerCamelCase , 0 ) ).latent_dist.sample(
generator=_lowerCamelCase )[0]
a : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Dict = self.scheduler.add_noise(_lowerCamelCase , _lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : Any = int(mask_start_secs * pixels_per_second )
a : int = int(mask_end_secs * pixels_per_second )
a : Dict = self.scheduler.add_noise(_lowerCamelCase , _lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _lowerCamelCase ):
a : Optional[Any] = self.unet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )['''sample''']
else:
a : Optional[int] = self.unet(_lowerCamelCase , _lowerCamelCase )['''sample''']
if isinstance(self.scheduler , _lowerCamelCase ):
a : int = self.scheduler.step(
model_output=_lowerCamelCase , timestep=_lowerCamelCase , sample=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , )['''prev_sample''']
else:
a : int = self.scheduler.step(
model_output=_lowerCamelCase , timestep=_lowerCamelCase , sample=_lowerCamelCase , generator=_lowerCamelCase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
a : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : Dict = self.vqvae.decode(_lowerCamelCase )['''sample''']
a : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
a : int = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
a : str = (images * 2_5_5).round().astype('uint8' )
a : Union[str, Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_lowerCamelCase , mode='RGB' ).convert('L' ) for _ in images) )
a : Optional[int] = [self.mel.image_to_audio(_lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_lowerCamelCase ) )
@torch.no_grad()
def lowerCamelCase__ ( self : List[Any] , A : Optional[Any] , A : int = 5_0 ):
'''simple docstring'''
assert isinstance(self.scheduler , _lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase )
a : int = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
a : Union[str, Any] = (sample / 2_5_5) * 2 - 1
a : Any = torch.Tensor(_lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
a : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : Optional[int] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : Union[str, Any] = 1 - alpha_prod_t
a : Union[str, Any] = self.unet(_lowerCamelCase , _lowerCamelCase )['''sample''']
a : Dict = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Tuple = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : str = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCamelCase__ ( A : str , A : int , A : Optional[int] ):
'''simple docstring'''
a : str = acos(torch.dot(torch.flatten(_lowerCamelCase ) , torch.flatten(_lowerCamelCase ) ) / torch.norm(_lowerCamelCase ) / torch.norm(_lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_lowerCamelCase ) + sin(alpha * theta ) * xa / sin(_lowerCamelCase )
| 369
|
"""simple docstring"""
def snake_case (A_ :str , A_ :bool = False ):
'''simple docstring'''
if not isinstance(A_ , A_ ):
a : Union[str, Any] = f'''Expected string as input, found {type(A_ )}'''
raise ValueError(A_ )
if not isinstance(A_ , A_ ):
a : Optional[int] = f'''Expected boolean as use_pascal parameter, found {type(A_ )}'''
raise ValueError(A_ )
a : Tuple = input_str.split('_' )
a : Dict = 0 if use_pascal else 1
a : int = words[start_index:]
a : int = [word[0].upper() + word[1:] for word in words_to_capitalize]
a : List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 186
| 0
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
lowercase__ : Any = str(UpperCAmelCase )
lowercase__ : int = ''''''.join(sorted(UpperCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCamelCase ( UpperCAmelCase = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
lowercase__ : Any = 0
lowercase__ : Union[str, Any] = 1
while True:
if check_bouncy(UpperCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(99)}')
| 198
|
'''simple docstring'''
from math import pow
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowercase__ : Optional[Any] = int(pow(UpperCAmelCase , UpperCAmelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowercase__ , lowercase__ : Dict = backtrack(
UpperCAmelCase , UpperCAmelCase , current_number + 1 , UpperCAmelCase , UpperCAmelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowercase__ , lowercase__ : str = backtrack(
UpperCAmelCase , UpperCAmelCase , current_number + 1 , UpperCAmelCase , UpperCAmelCase )
return current_sum, solutions_count
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(UpperCAmelCase , UpperCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({} )
_lowerCamelCase = '''text'''
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 367
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _A ( _a : Callable[[int | float], int | float] , _a : int | float , _a : int | float , _a : int = 1_0_0 , ):
"""simple docstring"""
A = x_start
A = fnc(_a )
A = 0.0
for _ in range(_a ):
# Approximates curve as a sequence of linear lines and sums their length
A = (x_end - x_start) / steps + xa
A = fnc(_a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A = xa
A = fxa
return length
if __name__ == "__main__":
def _A ( _a : Tuple ):
"""simple docstring"""
return math.sin(1_0 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
UpperCAmelCase =10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 77
| 0
|
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 61
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_a = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """albert"""
def __init__( self , lowercase_=3_0000 , lowercase_=128 , lowercase_=4096 , lowercase_=12 , lowercase_=1 , lowercase_=64 , lowercase_=1_6384 , lowercase_=1 , lowercase_="gelu_new" , lowercase_=0 , lowercase_=0 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=0.1 , lowercase_="absolute" , lowercase_=0 , lowercase_=2 , lowercase_=3 , **lowercase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[int] = embedding_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_hidden_groups
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Any = inner_group_num
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Any = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout_prob
UpperCAmelCase_ : Tuple = position_embedding_type
class A_ (lowercase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 61
| 1
|
from __future__ import annotations
def _lowerCAmelCase ( A__: list[int] , A__: int , A__: int , A__: int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase , UpperCAmelCase = array[indexa], array[indexa]
def _lowerCAmelCase ( A__: list[int] , A__: int , A__: int , A__: int ):
'''simple docstring'''
if length > 1:
UpperCAmelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def _lowerCAmelCase ( A__: list[int] , A__: int , A__: int , A__: int ):
'''simple docstring'''
if length > 1:
UpperCAmelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
__magic_name__ = input("Enter numbers separated by a comma:\n").strip()
__magic_name__ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 152
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__magic_name__ = "Usage of script: script_name <size_of_canvas:int>"
__magic_name__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
UpperCAmelCase = [[False for i in range(A__ )] for j in range(A__ )]
return canvas
def _lowerCAmelCase ( A__: list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(A__ ):
for j, _ in enumerate(A__ ):
UpperCAmelCase = bool(random.getrandbits(1 ) )
def _lowerCAmelCase ( A__: list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase = np.array(A__ )
UpperCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(A__ ):
for c, pt in enumerate(A__ ):
UpperCAmelCase = __judge_point(
A__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCAmelCase = current_canvas.tolist()
return return_canvas
def _lowerCAmelCase ( A__: bool , A__: list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCAmelCase = pt
if pt:
if alive < 2:
UpperCAmelCase = False
elif alive == 2 or alive == 3:
UpperCAmelCase = True
elif alive > 3:
UpperCAmelCase = False
else:
if alive == 3:
UpperCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__magic_name__ = int(sys.argv[1])
# main working structure of this module.
__magic_name__ = create_canvas(canvas_size)
seed(c)
__magic_name__ , __magic_name__ = plt.subplots()
fig.show()
__magic_name__ = ListedColormap(["w", "k"])
try:
while True:
__magic_name__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 152
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case_ = key.replace('''heads.cmd.mim_head.cls.predictions''', '''mmm_image_head''' )
snake_case_ = key.replace('''heads.cmd.mlm_head.cls.predictions''', '''mmm_text_head''' )
snake_case_ = key.replace('''heads.cmd.itm_head.cls''', '''itm_head''' )
snake_case_ = key.replace('''heads.cmd.itm_head.pooler''', '''itm_head.pooler''' )
snake_case_ = key.replace('''heads.cmd.clip_head.logit_scale''', '''flava.logit_scale''' )
snake_case_ = key.replace('''heads.fairseq_mlm.cls.predictions''', '''mlm_head''' )
snake_case_ = key.replace('''heads.imagenet.mim_head.cls.predictions''', '''mim_head''' )
snake_case_ = key.replace('''mm_text_projection''', '''flava.text_to_mm_projection''' )
snake_case_ = key.replace('''mm_image_projection''', '''flava.image_to_mm_projection''' )
snake_case_ = key.replace('''image_encoder.module''', '''flava.image_model''' )
snake_case_ = key.replace('''text_encoder.module''', '''flava.text_model''' )
snake_case_ = key.replace('''mm_encoder.module.encoder.cls_token''', '''flava.multimodal_model.cls_token''' )
snake_case_ = key.replace('''mm_encoder.module''', '''flava.multimodal_model''' )
snake_case_ = key.replace('''text_projection''', '''flava.text_projection''' )
snake_case_ = key.replace('''image_projection''', '''flava.image_projection''' )
snake_case_ = value.float()
for key, value in codebook_state_dict.items():
snake_case_ = value
return upgrade
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
snake_case_ = FlavaConfig.from_pretrained(lowercase_ )
else:
snake_case_ = FlavaConfig()
snake_case_ = FlavaForPreTraining(lowercase_ ).eval()
snake_case_ = convert_dalle_checkpoint(lowercase_, lowercase_, save_checkpoint=lowercase_ )
if os.path.exists(lowercase_ ):
snake_case_ = torch.load(lowercase_, map_location='''cpu''' )
else:
snake_case_ = torch.hub.load_state_dict_from_url(lowercase_, map_location='''cpu''' )
snake_case_ = upgrade_state_dict(lowercase_, lowercase_ )
hf_model.load_state_dict(lowercase_ )
snake_case_ = hf_model.state_dict()
snake_case_ = count_parameters(lowercase_ )
snake_case_ = count_parameters(lowercase_ ) + count_parameters(lowercase_ )
assert torch.allclose(lowercase_, lowercase_, atol=1e-3 )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a : Union[str, Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 56
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Optional[Any] = '▁'
A_ : int = {'vocab_file': 'sentencepiece.bpe.model'}
A_ : int = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
A_ : Optional[int] = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
A_ : Tuple = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__: List[int] = []
UpperCAmelCase__: List[int] = []
def __init__( self , A__ , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=None , A__=None , A__=None , A__ = None , A__=None , A__=False , **A__ , ):
# Mask token behave like a normal word, i.e. include the space before it
A__ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
A__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
A__ : List[str] = legacy_behaviour
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A__ , **A__ , )
A__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
A__ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ : str = 1
A__ : Optional[int] = len(self.sp_model )
A__ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
A__ : Tuple = {v: k for k, v in self.lang_code_to_id.items()}
A__ : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A__ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A__ : int = src_lang if src_lang is not None else """eng_Latn"""
A__ : str = self.lang_code_to_id[self._src_lang]
A__ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
A__ : Tuple = self.__dict__.copy()
A__ : List[Any] = None
A__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A__ ):
A__ : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Any = {}
A__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __A ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __A ( self ):
return self._src_lang
@src_lang.setter
def __A ( self , A__ ):
A__ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
A__ : Dict = [1] * len(self.prefix_tokens )
A__ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def __A ( self , A__ , A__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self , A__ , A__ = None ):
A__ : Dict = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , A__ , A__ , A__ , A__ , **A__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A__ : Optional[int] = src_lang
A__ : List[Any] = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
A__ : Optional[int] = self.convert_tokens_to_ids(A__ )
A__ : Optional[int] = tgt_lang_id
return inputs
def __A ( self ):
A__ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , A__ ):
return self.sp_model.encode(A__ , out_type=A__ )
def __A ( self , A__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ : List[str] = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self , A__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self , A__ ):
A__ : Optional[Any] = """""".join(A__ ).replace(A__ , """ """ ).strip()
return out_string
def __A ( self , A__ , A__ = None ):
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ : Any = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
A__ : str = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __A ( self , A__ , A__ = "eng_Latn" , A__ = None , A__ = "fra_Latn" , **A__ , ):
A__ : Any = src_lang
A__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def __A ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self , A__ ):
A__ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A__ : Dict = []
A__ : str = [self.eos_token_id, self.cur_lang_code]
else:
A__ : List[str] = [self.cur_lang_code]
A__ : Optional[Any] = [self.eos_token_id]
def __A ( self , A__ ):
A__ : Union[str, Any] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A__ : Union[str, Any] = []
A__ : int = [self.eos_token_id, self.cur_lang_code]
else:
A__ : Dict = [self.cur_lang_code]
A__ : str = [self.eos_token_id]
| 192
| 0
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
SCREAMING_SNAKE_CASE_:Tuple = False
SCREAMING_SNAKE_CASE_:Dict = True
SCREAMING_SNAKE_CASE_:int = False
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
SCREAMING_SNAKE_CASE_:List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_:str = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
SCREAMING_SNAKE_CASE_:Dict = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
SCREAMING_SNAKE_CASE_:str = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
SCREAMING_SNAKE_CASE_:str = reader.read()
SCREAMING_SNAKE_CASE_:Union[str, Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
SCREAMING_SNAKE_CASE_:int = UNetaDModel(**config)
else:
SCREAMING_SNAKE_CASE_:List[Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
SCREAMING_SNAKE_CASE_:Tuple = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
SCREAMING_SNAKE_CASE_:Dict = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
SCREAMING_SNAKE_CASE_:int = config[key]
del config[key]
SCREAMING_SNAKE_CASE_:Dict = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
SCREAMING_SNAKE_CASE_:List[Any] = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
SCREAMING_SNAKE_CASE_:Any = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
SCREAMING_SNAKE_CASE_:Any = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
SCREAMING_SNAKE_CASE_:int = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
SCREAMING_SNAKE_CASE_:Dict = param_value
SCREAMING_SNAKE_CASE_:Any = True
if not has_changed:
SCREAMING_SNAKE_CASE_:Union[str, Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 365
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 115
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase: List[str] = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Union[str, Any] = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: int = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Any = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_UpperCamelCase: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 255
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = None ) -> list[list[str]]:
'''simple docstring'''
lowercase : str = word_bank or []
# create a table
lowercase : int = len(_UpperCAmelCase ) + 1
lowercase : list[list[list[str]]] = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowercase : int = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 255
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : Any = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['ConditionalDetrFeatureExtractor']
__SCREAMING_SNAKE_CASE : Any = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def snake_case (__lowercase , __lowercase , __lowercase=None , __lowercase=None ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
_snake_case : List[Any] = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowercase_ :
_lowerCamelCase = OPTConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=16 , lowercase_=16 , ):
_snake_case : Dict = parent
_snake_case : List[str] = batch_size
_snake_case : Optional[Any] = seq_length
_snake_case : Dict = is_training
_snake_case : List[Any] = use_labels
_snake_case : Dict = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Tuple = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Any = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : List[Any] = eos_token_id
_snake_case : Optional[int] = pad_token_id
_snake_case : Dict = bos_token_id
_snake_case : List[Any] = embed_dim
_snake_case : Optional[int] = word_embed_proj_dim
_snake_case : Union[str, Any] = False
def UpperCamelCase ( self ):
_snake_case : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : int = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
_snake_case : Any = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Any = TFOPTModel(config=lowercase_ )
_snake_case : int = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : Any = inputs_dict["attention_mask"][:1, :]
_snake_case : List[str] = 1
# first forward pass
_snake_case : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : int = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_lowerCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
_lowerCamelCase = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 10
def UpperCamelCase ( self ):
_snake_case : Dict = TFOPTModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_snake_case : Dict = model_class(config=lowercase_ )
_snake_case : Union[str, Any] = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
_snake_case : Optional[Any] = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
_snake_case : int = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
_snake_case : Tuple = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_snake_case : Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
_snake_case : Dict = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_snake_case : Optional[int] = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
_snake_case : Optional[int] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_snake_case : str = False
self.assertTrue(lowercase_ )
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
@require_tf
class lowercase_ ( unittest.TestCase ):
_lowerCamelCase = 99
def UpperCamelCase ( self ):
_snake_case : Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_snake_case : Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_snake_case : List[Any] = input_ids.shape[0]
_snake_case : List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowercase_ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFOPTModel.from_pretrained("facebook/opt-350m" )
_snake_case : Optional[Any] = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_snake_case : Optional[int] = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
_snake_case : List[Any] = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
_snake_case : List[str] = (1, 11, 512)
self.assertEqual(output.shape , lowercase_ )
_snake_case : Optional[int] = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
_snake_case : List[Any] = tf.function(lowercase_ , jit_compile=lowercase_ )
_snake_case : Tuple = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Optional[int] = "facebook/opt-350m"
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
_snake_case : Optional[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_snake_case : List[str] = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_snake_case : List[Any] = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ , add_special_tokens=lowercase_ )
_snake_case : List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_snake_case : Tuple = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
_snake_case : List[Any] = tf.function(lowercase_ , jit_compile=lowercase_ )
_snake_case : Tuple = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class lowercase_ ( unittest.TestCase ):
@property
def UpperCamelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase ( self ):
_snake_case : List[Any] = "facebook/opt-125m"
_snake_case : int = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_snake_case : str = []
_snake_case : Any = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : List[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
_snake_case : str = tokenizer(lowercase_ , return_tensors="tf" ).input_ids
_snake_case : Any = model.generate(lowercase_ , max_length=10 )
_snake_case : List[str] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = "facebook/opt-350m"
_snake_case : Dict = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : Dict = TFOPTForCausalLM.from_pretrained(lowercase_ )
_snake_case : int = "left"
# use different length sentences to test batching
_snake_case : Union[str, Any] = [
"Hello, my dog is a little",
"Today, I",
]
_snake_case : Optional[Any] = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ )
_snake_case : List[Any] = inputs["input_ids"]
_snake_case : Union[str, Any] = model.generate(input_ids=lowercase_ , attention_mask=inputs["attention_mask"] )
_snake_case : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_snake_case : List[str] = model.generate(input_ids=lowercase_ )
_snake_case : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_snake_case : int = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_snake_case : int = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
_snake_case : Tuple = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_snake_case : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
_snake_case : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
_snake_case : Optional[Any] = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def UpperCamelCase ( self ):
_snake_case : Tuple = "facebook/opt-350m"
_snake_case : Optional[int] = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_snake_case : str = []
_snake_case : str = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : List[str] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
_snake_case : Dict = tokenizer(lowercase_ , return_tensors="tf" ).input_ids
_snake_case : Any = model.generate(lowercase_ , max_length=10 )
_snake_case : Tuple = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 284
| 0
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
snake_case__ : Any = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple:
a_ : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) )
a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : int = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : List[Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self.get_dummy_inputs()
a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
a_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.get_dummy_inputs()
a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Optional[Any] = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
a_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.get_dummy_inputs()
a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : int = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
a_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.get_dummy_inputs()
a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Union[str, Any] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : List[str] = ort.SessionOptions()
a_ : int = False
return options
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
a_ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a_ : int = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = 'A fantasy landscape, trending on artstation'
a_ : str = torch.manual_seed(0 )
a_ : List[str] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : Dict = output.images
a_ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a_ : List[str] = init_image.resize((1_2_8, 1_2_8) )
a_ : Dict = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
a_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Any = 'A fantasy landscape, trending on artstation'
a_ : Tuple = torch.manual_seed(0 )
a_ : Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : str = output.images
a_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Tuple = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 32
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : torch.FloatTensor
class a__ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__(self , __lowercase = 3 , __lowercase = 3 , __lowercase = ("DownEncoderBlock2D",) , __lowercase = ("UpDecoderBlock2D",) , __lowercase = (64,) , __lowercase = 1 , __lowercase = "silu" , __lowercase = 3 , __lowercase = 32 , __lowercase = 2_56 , __lowercase = 32 , __lowercase = None , __lowercase = 0.1_8_2_1_5 , __lowercase = "group" , ):
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=__lowercase , out_channels=__lowercase , down_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , double_z=__lowercase , )
__lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
__lowerCAmelCase = VectorQuantizer(__lowercase , __lowercase , beta=0.2_5 , remap=__lowercase , sane_index_shape=__lowercase )
__lowerCAmelCase = nn.Convad(__lowercase , __lowercase , 1 )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=__lowercase , out_channels=__lowercase , up_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , norm_type=__lowercase , )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = self.encoder(__lowercase )
__lowerCAmelCase = self.quant_conv(__lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowercase )
@apply_forward_hook
def _snake_case (self , __lowercase , __lowercase = False , __lowercase = True ):
# also go through quantization layer
if not force_not_quantize:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.quantize(__lowercase )
else:
__lowerCAmelCase = h
__lowerCAmelCase = self.post_quant_conv(__lowercase )
__lowerCAmelCase = self.decoder(__lowercase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
def _snake_case (self , __lowercase , __lowercase = True ):
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(__lowercase ).latents
__lowerCAmelCase = self.decode(__lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowercase )
| 174
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=a , )
assert hasattr(self , 'env' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : str=1 )-> int:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=a , instance_type=self.instance_type , debugger_hook_config=a , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
TrainingJobAnalytics(a ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict:
"""simple docstring"""
lowercase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowercase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , a )
| 269
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_UpperCamelCase : ClassVar[Features] = Features({'text': Value('string' )} )
_UpperCamelCase : ClassVar[Features] = Features({} )
_UpperCamelCase : str = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 269
| 1
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
# ===== initialization =====
lowerCAmelCase__ : List[str] = Mock()
lowerCAmelCase__ : Optional[int] = conn, Mock()
lowerCAmelCase__ : Union[str, Any] = iter([1, None] )
lowerCAmelCase__ : List[str] = lambda __UpperCAmelCase : next(snake_case_ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=snake_case_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 242
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26
| 0
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.dummy_uncond_unet
lowercase__ = ScoreSdeVeScheduler()
lowercase__ = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_UpperCAmelCase ).images
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_UpperCAmelCase , return_dict=_UpperCAmelCase )[
0
]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = """google/ncsnpp-church-256"""
lowercase__ = UNetaDModel.from_pretrained(_UpperCAmelCase )
lowercase__ = ScoreSdeVeScheduler.from_pretrained(_UpperCAmelCase )
lowercase__ = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_UpperCAmelCase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 146
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableDiffusionInpaintPipeline
A__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
def lowerCamelCase__ (self : Tuple ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
lowercase__ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase__ = CLIPTextModel(_UpperCAmelCase )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any]=0 ) -> List[str]:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
lowercase__ = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
lowercase__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = sd_pipe(**_UpperCAmelCase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : str ) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
lowercase__ = """stabilityai/stable-diffusion-2-inpainting"""
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__ = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="""np""" , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
lowercase__ = """stabilityai/stable-diffusion-2-inpainting"""
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__ = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="""np""" , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowercase__ = """stabilityai/stable-diffusion-2-inpainting"""
lowercase__ = PNDMScheduler.from_pretrained(_UpperCAmelCase , subfolder="""scheduler""" )
lowercase__ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , scheduler=_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 146
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Optional[Any] , _A : Optional[int]=None , _A : Union[str, Any]=None , *_A : Tuple , **_A : Dict ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
if config is None:
assert isinstance(self.model , _A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
snake_case_ : Tuple = self.model.config
else:
snake_case_ : Tuple = config
snake_case_ : Optional[Any] = data_args
snake_case_ : int = self.config.tgt_vocab_size if isinstance(self.config , _A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
snake_case_ : Any = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case_ : Optional[Any] = label_smoothed_nll_loss
def UpperCAmelCase_ ( self : Tuple , _A : int ) -> str:
"""simple docstring"""
if self.optimizer is None:
snake_case_ : List[Any] = ['bias', 'LayerNorm.weight']
snake_case_ : Tuple = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
snake_case_ : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case_ : int = Adafactor
snake_case_ : List[str] = {'scale_parameter': False, 'relative_step': False}
else:
snake_case_ : Optional[Any] = AdamW
snake_case_ : str = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
snake_case_ : str = self.args.learning_rate
if self.sharded_ddp:
snake_case_ : Tuple = OSS(
params=_A , optim=_A , **_A , )
else:
snake_case_ : str = optimizer_cls(_A , **_A )
if self.lr_scheduler is None:
snake_case_ : int = self._get_lr_scheduler(_A )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def UpperCAmelCase_ ( self : Dict , _A : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case_ : Dict = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case_ : Tuple = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case_ : Tuple = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_A )
return scheduler
def UpperCAmelCase_ ( self : str ) -> Optional[torch.utils.data.Sampler]:
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCAmelCase_ ( self : Union[str, Any] , _A : List[str] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case_ : List[Any] = model(**_A , use_cache=_A )[0]
snake_case_ : Optional[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case_ ,snake_case_ : Dict = model(**_A , labels=_A , use_cache=_A )[:2]
else:
# compute label smoothed loss
snake_case_ : List[str] = model(**_A , use_cache=_A )[0]
snake_case_ : str = torch.nn.functional.log_softmax(_A , dim=-1 )
snake_case_ ,snake_case_ : List[Any] = self.loss_fn(_A , _A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCAmelCase_ ( self : Optional[int] , _A : Optional[Any] , _A : List[Any] ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = inputs.pop('labels' )
snake_case_ ,snake_case_ : int = self._compute_loss(_A , _A , _A )
return loss
def UpperCAmelCase_ ( self : Optional[int] , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] , _A : bool , _A : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""simple docstring"""
snake_case_ : Optional[int] = self._prepare_inputs(_A )
snake_case_ : Dict = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case_ : Optional[int] = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **_A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case_ : int = self._pad_tensors_to_max_len(_A , gen_kwargs['max_length'] )
snake_case_ : Tuple = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
snake_case_ ,snake_case_ : List[Any] = self._compute_loss(_A , _A , _A )
snake_case_ : int = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case_ : int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case_ : List[str] = self._pad_tensors_to_max_len(_A , gen_kwargs['max_length'] )
return (loss, logits, labels)
def UpperCAmelCase_ ( self : Any , _A : List[str] , _A : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
snake_case_ : Union[str, Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case_ : int = tensor
return padded_tensor
| 327
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A )
snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' )
snake_case_ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case_ : Optional[Any] = model.generate(**_A )
snake_case_ : int = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case_ : Optional[Any] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
snake_case_ : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 327
| 1
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
_SCREAMING_SNAKE_CASE = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_SCREAMING_SNAKE_CASE = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
_SCREAMING_SNAKE_CASE = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def lowerCAmelCase ( self : Any , __snake_case : Union[str, Any]=None , __snake_case : str=None , __snake_case : Union[str, Any]=False )-> Union[str, Any]:
if concatenate_texts:
return compute_measures(__snake_case , __snake_case )["wer"]
else:
snake_case = 0
snake_case = 0
for prediction, reference in zip(__snake_case , __snake_case ):
snake_case = compute_measures(__snake_case , __snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 351
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowerCAmelCase_ :List[str] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowerCAmelCase_ :Any = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowerCAmelCase_ :Union[str, Any] = model(__A , labels=__A ).loss
lowerCAmelCase_ :Union[str, Any] = -tf.math.reduce_mean(__A ).numpy()
lowerCAmelCase_ :List[str] = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 84
|
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1_0 ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError("""Invalid input""" )
lowerCAmelCase_ :List[str] = 1_0**n
lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 84
| 1
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_snake_case : List[str] = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = _TestCommandArgs(dataset=__lowerCamelCase , all_configs=__lowerCamelCase , save_infos=__lowerCamelCase )
__snake_case : List[Any] = TestCommand(*__lowerCamelCase )
test_command.run()
__snake_case : List[Any] = os.path.join(__lowerCamelCase , "README.md" )
assert os.path.exists(__lowerCamelCase )
__snake_case : Optional[Any] = DatasetInfosDict.from_directory(__lowerCamelCase )
__snake_case : List[str] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_3_5_1_5_6_3,
"num_examples": 1_0_0_0_0,
},
{
"name": "validation",
"num_bytes": 2_3_8_4_1_8,
"num_examples": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__snake_case , __snake_case : Tuple = getattr(dataset_infos["default"] , __lowerCamelCase ), getattr(expected_dataset_infos["default"] , __lowerCamelCase )
if key == "num_bytes":
assert is_apercent_close(__lowerCamelCase , __lowerCamelCase )
elif key == "splits":
assert list(__lowerCamelCase ) == list(__lowerCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 134
|
from __future__ import annotations
_snake_case : Union[str, Any] = []
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
for i in range(len(__lowerCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(__lowerCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__lowerCamelCase , -1 , -1 ) , range(__lowerCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__lowerCamelCase , -1 , -1 ) , range(__lowerCamelCase , len(__lowerCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if row >= len(__lowerCamelCase ):
solution.append(__lowerCamelCase )
printboard(__lowerCamelCase )
print()
return True
for i in range(len(__lowerCamelCase ) ):
if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = 1
solve(__lowerCamelCase , row + 1 )
__snake_case : Union[str, Any] = 0
return False
def lowerCAmelCase_ ( __lowerCamelCase ):
for i in range(len(__lowerCamelCase ) ):
for j in range(len(__lowerCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 134
| 1
|
'''simple docstring'''
import operator as op
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = lambda lowerCAmelCase , lowerCAmelCase : int(x / y ) # noqa: E731 integer division operation
_lowerCAmelCase = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(lowerCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowerCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ )
else:
_lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ )
_lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ )
stack.append(
str(opr[x](int(lowerCAmelCase ) , int(lowerCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
A__ : Optional[Any] =input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 70
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] =logging.get_logger(__name__)
A__ : Any =torch.device('''cpu''')
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = dct.pop(lowerCAmelCase )
_lowerCAmelCase = val
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for k in state_dict.keys():
_lowerCAmelCase = k
if ".pwconv" in k:
_lowerCAmelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
_lowerCAmelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
_lowerCAmelCase = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
_lowerCAmelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_lowerCAmelCase = k_new.split(""".""" )
if ls[2].isdigit():
_lowerCAmelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_lowerCAmelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCAmelCase = 10_00
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """imagenet-1k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_lowerCAmelCase = [3, 3, 6, 4]
_lowerCAmelCase = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
_lowerCAmelCase = [3, 3, 9, 6]
_lowerCAmelCase = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
_lowerCAmelCase = [4, 3, 10, 5]
_lowerCAmelCase = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
_lowerCAmelCase = [4, 4, 12, 6]
_lowerCAmelCase = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase )
else:
_lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
_lowerCAmelCase = checkpoint
_lowerCAmelCase = create_rename_keys(lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
_lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval()
hf_model.load_state_dict(lowerCAmelCase )
# prepare test inputs
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""pt""" )
# compare outputs from both models
_lowerCAmelCase = get_expected_output(lowerCAmelCase )
_lowerCAmelCase = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1e-3 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
A__ : Tuple =parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 70
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = namedtuple("result" ,"name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" ,power / current )
elif current == 0:
return result("current" ,power / voltage )
elif power == 0:
return result("power" ,float(round(abs(voltage * current ) ,2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 255 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : Any = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : int = size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : Tuple = do_center_crop
UpperCAmelCase_ : Any = crop_size
UpperCAmelCase_ : List[str] = do_rescale
UpperCAmelCase_ : Dict = rescale_factor
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any , ) -> np.ndarray:
UpperCAmelCase_ : Any = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase_ : List[Any] = get_resize_output_image_size(lowerCAmelCase_ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray:
UpperCAmelCase_ : List[str] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Tuple , ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : List[Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" )
UpperCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Optional[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : Union[str, Any] = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Tuple = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Any = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : Any = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
UpperCAmelCase_ : int = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Tuple] = None ) -> Optional[int]:
UpperCAmelCase_ : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = target_sizes.numpy()
UpperCAmelCase_ : Dict = []
for idx in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCAmelCase_ )
UpperCAmelCase_ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Tuple = logits.argmax(dim=1 )
UpperCAmelCase_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 253
| 1
|
import os
def __snake_case ( ):
__a = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
__a = os.path.join(_UpperCAmelCase , '''triangle.txt''' )
with open(_UpperCAmelCase ) as f:
__a = f.readlines()
__a = []
for line in triangle:
__a = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_UpperCAmelCase ) )
a.append(_UpperCAmelCase )
for i in range(1 , len(_UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
__a = a[i - 1][j] if j != len(a[i - 1] ) else 0
__a = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCAmelCase , _UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 49
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
lowercase = nums[0]
lowercase = 0
for num in nums[1:]:
lowercase , lowercase = (
max_excluding + num,
max(lowerCAmelCase__ , lowerCAmelCase__ ),
)
return max(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197
| 0
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def _A ( A__ ):
"""simple docstring"""
__lowercase = None
# source code of `config_class`
__lowercase = inspect.getsource(A__ )
__lowercase = _re_checkpoint.findall(A__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__lowercase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__lowercase = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
__lowercase = ckpt_name
break
return checkpoint
def _A ( ):
"""simple docstring"""
__lowercase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__lowercase = get_checkpoint_from_config_class(A__ )
__lowercase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A__ )
if len(A__ ) > 0:
__lowercase = '''\n'''.join(sorted(A__ ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 52
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = RobertaConfig
SCREAMING_SNAKE_CASE : Optional[Any] = 'roberta'
def __init__( self : List[Any] ,lowercase__ : Optional[Any] ):
super().__init__(lowercase__ )
__lowercase = RobertaEmbeddings(lowercase__ )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = RobertaConfig
SCREAMING_SNAKE_CASE : Any = 'roberta'
def __init__( self : Union[str, Any] ,lowercase__ : int ):
super().__init__(lowercase__ )
__lowercase = config.num_labels
__lowercase = config.num_hidden_layers
__lowercase = DeeRobertaModel(lowercase__ )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Optional[int]=None ,lowercase__ : int=None ,lowercase__ : Dict=None ,lowercase__ : List[Any]=None ,lowercase__ : str=None ,lowercase__ : List[Any]=-1 ,lowercase__ : Tuple=False ,):
__lowercase = self.num_layers
try:
__lowercase = self.roberta(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,position_ids=lowercase__ ,head_mask=lowercase__ ,inputs_embeds=lowercase__ ,)
__lowercase = outputs[1]
__lowercase = self.dropout(lowercase__ )
__lowercase = self.classifier(lowercase__ )
__lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowercase = e.message
__lowercase = e.exit_layer
__lowercase = outputs[0]
if not self.training:
__lowercase = entropy(lowercase__ )
__lowercase = []
__lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
__lowercase = []
for highway_exit in outputs[-1]:
__lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(lowercase__ )
if train_highway:
__lowercase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowercase = (loss,) + outputs
if not self.training:
__lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 52
| 1
|
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 101
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335
| 0
|
'''simple docstring'''
import math
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
return math.pow(_SCREAMING_SNAKE_CASE , 2 ) - a
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float ):
return 2 * x
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float ):
__a : Tuple = 2.0
while start <= a:
__a : Optional[Any] = math.pow(_SCREAMING_SNAKE_CASE , 2 )
return start
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 9_999 , _SCREAMING_SNAKE_CASE : float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
if a < 0:
raise ValueError('math domain error' )
__a : Optional[Any] = get_initial_point(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a : str = value
__a : List[Any] = value - fx(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / fx_derivative(_SCREAMING_SNAKE_CASE )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 294
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : List[str] ,lowercase_ : Any=None ,lowercase_ : Any=None ,lowercase_ : Optional[int]=None ,lowercase_ : Optional[int]="resnet50" ,lowercase_ : str=3 ,lowercase_ : List[str]=3_2 ,lowercase_ : Any=3 ,lowercase_ : Dict=True ,lowercase_ : Union[str, Any]=True ,):
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : List[str] = out_indices if out_indices is not None else [4]
lowerCAmelCase__ : List[str] = stage_names
lowerCAmelCase__ : Optional[int] = out_features
lowerCAmelCase__ : int = backbone
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : str = image_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Tuple = use_pretrained_backbone
lowerCAmelCase__ : Dict = is_training
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self : List[str] ):
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __lowerCAmelCase ( self : int ,lowercase_ : str ,lowercase_ : Dict ):
lowerCAmelCase__ : int = TimmBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 1_4, 1_4) ,)
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = config_and_inputs
lowerCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TimmBackbone,) if is_torch_available() else ()
lowercase__ = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : List[Any] = TimmBackboneModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ )
def __lowerCAmelCase ( self : Any ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : List[str] = '''resnet18'''
lowerCAmelCase__ : List[str] = '''microsoft/resnet-18'''
lowerCAmelCase__ : Tuple = AutoBackbone.from_pretrained(lowercase_ ,use_timm_backbone=lowercase_ )
lowerCAmelCase__ : List[Any] = AutoBackbone.from_pretrained(lowercase_ )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
lowerCAmelCase__ : List[str] = AutoBackbone.from_pretrained(lowercase_ ,use_timm_backbone=lowercase_ ,out_indices=[1, 2, 3] )
lowerCAmelCase__ : List[str] = AutoBackbone.from_pretrained(lowercase_ ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self : Dict ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self : List[Any] ):
pass
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ ,lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(lowercase_ )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase__ : List[Any] = self.all_model_classes[0]
lowerCAmelCase__ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
lowerCAmelCase__ : Optional[int] = self._prepare_for_class(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Any = model(**lowercase_ )
lowerCAmelCase__ : List[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase__ : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase__ : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Tuple = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase__ : List[str] = copy.deepcopy(lowercase_ )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase__ : List[str] = copy.deepcopy(lowercase_ )
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Dict = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Any = model(**lowercase_ )
| 106
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: List[Any] = VOCAB_FILES_NAMES
__magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION
__magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Union[str, Any] = LxmertTokenizer
def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) )
snake_case_ : Union[str, Any] = do_lower_case
snake_case_ : int = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : List[Any] = normalizer_class(**_A )
snake_case_ : Tuple = do_lower_case
def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : str = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 327
| 0
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _UpperCAmelCase ( _UpperCamelCase : Tuple ) -> Optional[int]:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set() )
@pytest.fixture
def _UpperCAmelCase ( _UpperCamelCase : int ) -> Optional[Any]:
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
A_ = metric_id
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Any = [MetricMock(__lowercase ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __A ( self ) -> Tuple:
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock() )
@pytest.mark.parametrize(
'''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : int, _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
if "tmp_path" in args:
A_ = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(__a, match='''https://huggingface.co/docs/evaluate''' ):
func(*__a )
| 371
|
'''simple docstring'''
from statistics import mean, stdev
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = min(_UpperCamelCase )
A_ = max(_UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min), _UpperCamelCase ) for x in data]
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = mean(_UpperCamelCase )
A_ = stdev(_UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma), _UpperCamelCase ) for x in data]
| 18
| 0
|
def A_ ( snake_case : List[Any] ) -> Any:
'''simple docstring'''
__UpperCamelCase = len(snake_case )
__UpperCamelCase = sum(snake_case )
__UpperCamelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__UpperCamelCase = True
for i in range(1 , s + 1 ):
__UpperCamelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__UpperCamelCase = dp[i][j - 1]
if arr[i - 1] <= j:
__UpperCamelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__UpperCamelCase = s - 2 * j
break
return diff
| 328
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A__ = TypeVar("""T""")
A__ = TypeVar("""U""")
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase , _lowerCAmelCase = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
_lowerCAmelCase = ["""DoubleLinkedList"""]
_lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(_snake_case ) )
_lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase = node
_lowerCAmelCase = previous
_lowerCAmelCase = node
_lowerCAmelCase = self.rear
def snake_case ( self , _snake_case ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowerCAmelCase = node.next
_lowerCAmelCase = node.prev
_lowerCAmelCase = None
_lowerCAmelCase = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
__lowerCamelCase = {}
def __init__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedList()
_lowerCAmelCase = capacity
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def __repr__( self ):
"""simple docstring"""
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , _snake_case ):
"""simple docstring"""
return key in self.cache
def snake_case ( self , _snake_case ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowerCAmelCase = self.cache[key]
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_snake_case )
return node.val
self.miss += 1
return None
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase = value
self.list.add(_snake_case )
@classmethod
def snake_case ( cls , _snake_case = 128 ):
"""simple docstring"""
def cache_decorator_inner(_snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*_snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase = LRUCache(_snake_case )
_lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase = func(*_snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , _snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_snake_case , """cache_info""" , _snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : int = 'megatron-bert'
def __init__( self : Dict , lowerCAmelCase__ : Tuple=29056 , lowerCAmelCase__ : int=1024 , lowerCAmelCase__ : Tuple=24 , lowerCAmelCase__ : Optional[int]=16 , lowerCAmelCase__ : Optional[int]=4096 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : List[Any]=512 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=1e-1_2 , lowerCAmelCase__ : Tuple=0 , lowerCAmelCase__ : Union[str, Any]="absolute" , lowerCAmelCase__ : Any=True , **lowerCAmelCase__ : str , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
| 287
|
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowercase ):
for j in range(lowercase ):
_UpperCamelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase__ : Optional[int] = imread('image_data/lena.jpg', 1)
# convert to its negative
lowercase__ : Union[str, Any] = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 287
| 1
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
'''simple docstring'''
try:
A__ = int(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A__ = 2
A__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A__ = i
while n % i == 0:
A__ = n // i
i += 1
return int(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 68
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 10 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or n < 0:
raise ValueError('''Invalid input''' )
A : List[str] = 10**n
A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 3
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """facebook/bart-large-mnli"""
__lowercase = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__lowercase = """text_classifier"""
__lowercase = AutoTokenizer
__lowercase = AutoModelForSequenceClassification
__lowercase = ["""text""", ["""text"""]]
__lowercase = ["""text"""]
def lowerCamelCase ( self ):
"""simple docstring"""
super().setup()
_snake_case = self.model.config
_snake_case = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
_snake_case = int(lowerCAmelCase_ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = labels
return self.pre_processor(
[text] * len(lowerCAmelCase_ ) , [F'This example is {label}' for label in labels] , return_tensors='pt' , padding='max_length' , )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = outputs.logits
_snake_case = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 160
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class __UpperCAmelCase :
def __init__( self ):
"""simple docstring"""
_snake_case = []
_snake_case = 0
_snake_case = 0
def lowerCamelCase ( self ):
"""simple docstring"""
return self.head == self.tail
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
self.data.append(lowerCAmelCase_ )
_snake_case = self.tail + 1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.data[self.head]
_snake_case = self.head + 1
return ret
def lowerCamelCase ( self ):
"""simple docstring"""
return self.tail - self.head
def lowerCamelCase ( self ):
"""simple docstring"""
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = data
_snake_case = None
_snake_case = None
_snake_case = 1
def lowerCamelCase ( self ):
"""simple docstring"""
return self.data
def lowerCamelCase ( self ):
"""simple docstring"""
return self.left
def lowerCamelCase ( self ):
"""simple docstring"""
return self.right
def lowerCamelCase ( self ):
"""simple docstring"""
return self.height
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = data
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = node
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = node
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = height
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
if node is None:
return 0
return node.get_height()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
if a > b:
return a
return b
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
print('left rotation node:' , node.get_data() )
_snake_case = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
_snake_case = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
print('right rotation node:' , node.get_data() )
_snake_case = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
_snake_case = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__A )
return ret
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
_snake_case = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__A ) )
return right_rotation(__A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> MyNode:
_snake_case = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__A ) )
return left_rotation(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> MyNode | None:
if node is None:
return MyNode(__A )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __A ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_snake_case = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_snake_case = right_rotation(__A )
else:
_snake_case = lr_rotation(__A )
else:
node.set_right(insert_node(node.get_right() , __A ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_snake_case = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_snake_case = rl_rotation(__A )
else:
_snake_case = left_rotation(__A )
_snake_case = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__A )
return node
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
while True:
_snake_case = root.get_right()
if right_child is None:
break
_snake_case = right_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
while True:
_snake_case = root.get_left()
if left_child is None:
break
_snake_case = left_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> MyNode | None:
_snake_case = root.get_left()
_snake_case = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_snake_case = get_left_most(__A )
root.set_data(__A )
root.set_right(del_node(__A , __A ) )
elif left_child is not None:
_snake_case = left_child
elif right_child is not None:
_snake_case = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(__A , __A ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__A , __A ) )
if get_height(__A ) - get_height(__A ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_snake_case = left_rotation(__A )
else:
_snake_case = rl_rotation(__A )
elif get_height(__A ) - get_height(__A ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_snake_case = right_rotation(__A )
else:
_snake_case = lr_rotation(__A )
_snake_case = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__A )
return root
class __UpperCAmelCase :
def __init__( self ):
"""simple docstring"""
_snake_case = None
def lowerCamelCase ( self ):
"""simple docstring"""
return get_height(self.root )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
print('insert:' + str(lowerCAmelCase_ ) )
_snake_case = insert_node(self.root , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
print('delete:' + str(lowerCAmelCase_ ) )
if self.root is None:
print('Tree is empty!' )
return
_snake_case = del_node(self.root , lowerCAmelCase_ )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
_snake_case = ''
_snake_case = MyQueue()
q.push(self.root )
_snake_case = self.get_height()
if layer == 0:
return output
_snake_case = 0
while not q.is_empty():
_snake_case = q.pop()
_snake_case = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase_ )
q.push(lowerCAmelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_snake_case = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , lowerCAmelCase_ ) - 1:
_snake_case = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def SCREAMING_SNAKE_CASE__ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowercase : List[Any] = AVLtree()
lowercase : Dict = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 160
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : Any,_A : int,_A : Tuple=7,_A : Tuple=3,_A : int=30,_A : str=400,_A : Any=True,_A : Optional[Any]=None,_A : Optional[Any]=True,_A : Dict=[0.5, 0.5, 0.5],_A : Dict=[0.5, 0.5, 0.5],_A : str=True,_A : Optional[int]=1 / 255,_A : Dict=True,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Dict = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = min_resolution
SCREAMING_SNAKE_CASE_ : List[str] = max_resolution
SCREAMING_SNAKE_CASE_ : List[str] = do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size
SCREAMING_SNAKE_CASE_ : Dict = do_normalize
SCREAMING_SNAKE_CASE_ : Any = image_mean
SCREAMING_SNAKE_CASE_ : Any = image_std
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE_ : str = rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_pad
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : Union[str, Any],_A : List[str],_A : Tuple=False ):
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_inputs[0]
if isinstance(_A,Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : Tuple = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE_ : int = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE_ : int = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_ : int = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : Tuple = max(_A,key=lambda _A : item[0] )[0]
SCREAMING_SNAKE_CASE_ : Dict = max(_A,key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( A__ , unittest.TestCase ):
A = DetaImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = DetaImageProcessingTester(self )
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A,"image_mean" ) )
self.assertTrue(hasattr(_A,"image_std" ) )
self.assertTrue(hasattr(_A,"do_normalize" ) )
self.assertTrue(hasattr(_A,"do_resize" ) )
self.assertTrue(hasattr(_A,"do_rescale" ) )
self.assertTrue(hasattr(_A,"do_pad" ) )
self.assertTrue(hasattr(_A,"size" ) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad,_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : List[str] = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor_tester.get_expected_values(_A,batched=_A )
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(_A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Any = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A,numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Any = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(_A,return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(_A,batched=_A )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=_A,torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
SCREAMING_SNAKE_CASE_ : int = image_processing(_A,return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.get_expected_values(_A,batched=_A )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt","r" ) as f:
SCREAMING_SNAKE_CASE_ : Any = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DetaImageProcessor()
SCREAMING_SNAKE_CASE_ : Dict = image_processing(images=_A,annotations=_A,return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape,_A )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3],_A,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : int = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"],_A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape,_A )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0],_A,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"],_A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"],_A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"],_A ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"],_A ) )
# verify size
SCREAMING_SNAKE_CASE_ : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"],_A ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt","r" ) as f:
SCREAMING_SNAKE_CASE_ : str = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE_ : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE_ : Any = image_processing(images=_A,annotations=_A,masks_path=_A,return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3],_A,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"],_A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape,_A )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0],_A,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"],_A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"],_A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"],_A ) )
# verify masks
SCREAMING_SNAKE_CASE_ : str = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item(),_A )
# verify orig_size
SCREAMING_SNAKE_CASE_ : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"],_A ) )
# verify size
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"],_A ) )
| 18
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase = TaTokenizerFast
lowerCamelCase = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 199
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''ViTImageProcessor'''
UpperCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowercase__ ( self : List[Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Dict , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 241
| 1
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
A__ : Optional[Any] ='''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
A__ : Union[str, Any] =BASE_URL + '''/user'''
# https://github.com/settings/tokens
A__ : Optional[int] =os.environ.get('''USER_TOKEN''', '''''')
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = {
"""Authorization""": f"token {auth_token}",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 70
|
from __future__ import annotations
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = data
__lowerCamelCase = self
__lowerCamelCase = 0
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
# map from node name to the node object
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# create a new set with x as its member
__lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# find the set x belongs to (with path-compression)
__lowerCamelCase = self.map[data]
if elem_ref != elem_ref.parent:
__lowerCamelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# helper function for union operation
if nodea.rank > nodea.rank:
__lowerCamelCase = nodea
else:
__lowerCamelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# merge 2 disjoint sets
self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) )
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
# connections: map from the node to the neighbouring nodes (with weights)
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# add a node ONLY if its not present in the graph
if node not in self.connections:
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# add an edge with the given weight
self.add_node(__UpperCAmelCase )
self.add_node(__UpperCAmelCase )
__lowerCamelCase = weight
__lowerCamelCase = weight
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __UpperCAmelCase : x[2] )
# creating the disjoint set
__lowerCamelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__UpperCAmelCase )
# MST generation
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index]
index += 1
__lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase )
__lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase )
return graph
| 330
| 0
|
"""simple docstring"""
lowerCAmelCase : Tuple = [
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def a__ ( snake_case__ ) -> int:
lowerCamelCase = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
lowerCamelCase = 0
lowerCamelCase = 0
while place < len(snake_case__ ):
if (place + 1 < len(snake_case__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a__ ( snake_case__ ) -> str:
lowerCamelCase = []
for arabic, roman in ROMAN:
((lowerCamelCase) , (lowerCamelCase)) = divmod(snake_case__ , snake_case__ )
result.append(roman * factor )
if number == 0:
break
return "".join(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
|
"""simple docstring"""
from __future__ import annotations
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
lowerCamelCase = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(_a ) != 0:
lowerCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_a ) != cols:
raise error
for value in row:
if not isinstance(_a , (int, float) ):
raise error
lowerCamelCase = rows
else:
lowerCamelCase = []
def _lowerCAmelCase ( self ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.rows )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.rows[0] )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.order[0] == self.order[1]
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return bool(self.determinant() )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_a ).determinant()
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(_a , _a )
return -1 * self.get_minor(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(_a , _a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
"""simple docstring"""
return str(self.rows )
def __str__( self ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(_a ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(_a , _a ):
raise type_error
for value in row:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(_a )
else:
lowerCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(_a , _a ):
raise type_error
for value in column:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
lowerCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowerCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , _a ):
"""simple docstring"""
return not self == other
def __neg__( self ):
"""simple docstring"""
return self * -1
def __add__( self , _a ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , _a ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , _a ):
"""simple docstring"""
if isinstance(_a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_a , _a ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(_a , _a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
lowerCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCAmelCase ( cls , _a , _a ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(_a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
| 1
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.array:
snake_case_ = int(np.ceil((x_end - xa) / step_size ) )
snake_case_ = np.zeros((n + 1,) )
snake_case_ = ya
snake_case_ = xa
for k in range(_SCREAMING_SNAKE_CASE ):
snake_case_ = y[k] + step_size * ode_func(_SCREAMING_SNAKE_CASE , y[k] )
snake_case_ = y[k] + (
(step_size / 2) * (ode_func(_SCREAMING_SNAKE_CASE , y[k] ) + ode_func(x + step_size , _SCREAMING_SNAKE_CASE ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
|
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347
| 1
|
from __future__ import annotations
import math
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
)
def lowerCAmelCase( )-> None:
"""simple docstring"""
UpperCamelCase_ = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCamelCase_ = math.log(len(SCREAMING_SNAKE_CASE_ ) , 2 )
print(f"Optimal value : {minimax(0 , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __magic_name__ ( unittest.TestCase ):
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , )-> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_attention_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_choices
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_attention_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase_ ( self )-> str:
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained("albert-base-v2" )
UpperCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = FlaxAlbertModel.from_pretrained("albert-base-v2" )
UpperCamelCase_ = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCamelCase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )[0]
UpperCamelCase_ = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
UpperCamelCase_ = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1e-4 ) )
| 60
| 1
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowercase : str = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowercase : Optional[Any] = concatenate_datasets
lowercase : List[Any] = DownloadConfig
lowercase : List[str] = DownloadManager
lowercase : int = DownloadMode
lowercase : Optional[Any] = DownloadConfig
lowercase : Union[str, Any] = DownloadMode
lowercase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 99
|
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
return getitem, k
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
return setitem, k, v
def __lowerCamelCase ( _lowercase ) -> int:
return delitem, k
def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]:
try:
return fun(_lowercase , *_lowercase ), None
except Exception as e:
return None, e
a : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a : int = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a : List[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = HashMap(initial_block_size=4 )
UpperCAmelCase : Dict = {}
for _, (fun, *args) in enumerate(_lowercase ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase )
UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase )
assert my_res == py_res
assert str(_lowercase ) == str(_lowercase )
assert set(_lowercase ) == set(_lowercase )
assert len(_lowercase ) == len(_lowercase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowercase ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )}
UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )}
assert dict_public_names > hash_public_names
| 265
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Dict = logging.getLogger()
def A__ ( ) -> Union[str, Any]:
UpperCamelCase_: List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
UpperCamelCase_: Any = parser.parse_args()
return args.f
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : Dict ):
UpperCamelCase_: Any = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(snake_case_ , """argv""" , snake_case_ ):
UpperCamelCase_: int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(snake_case_ , 0.666 )
@slow
@require_torch_non_multi_gpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(snake_case_ )
UpperCamelCase_: Dict = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(snake_case_ )
UpperCamelCase_: List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(snake_case_ )
| 223
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = """luke"""
def __init__( self : Tuple , snake_case_ : List[Any]=5_0267 , snake_case_ : Any=50_0000 , snake_case_ : str=768 , snake_case_ : int=256 , snake_case_ : str=12 , snake_case_ : int=12 , snake_case_ : Dict=3072 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : int=512 , snake_case_ : Dict=2 , snake_case_ : List[Any]=0.02 , snake_case_ : int=1e-12 , snake_case_ : Union[str, Any]=True , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=1 , snake_case_ : Optional[int]=0 , snake_case_ : List[str]=2 , **snake_case_ : Union[str, Any] , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: Tuple = entity_vocab_size
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: Any = entity_emb_size
UpperCamelCase_: str = num_hidden_layers
UpperCamelCase_: Union[str, Any] = num_attention_heads
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: str = hidden_dropout_prob
UpperCamelCase_: List[str] = attention_probs_dropout_prob
UpperCamelCase_: int = max_position_embeddings
UpperCamelCase_: int = type_vocab_size
UpperCamelCase_: List[Any] = initializer_range
UpperCamelCase_: Union[str, Any] = layer_norm_eps
UpperCamelCase_: Tuple = use_entity_aware_attention
UpperCamelCase_: int = classifier_dropout
| 223
| 1
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
if args.model_type == "bert":
SCREAMING_SNAKE_CASE : Any = BertForMaskedLM.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE : Union[str, Any] = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
SCREAMING_SNAKE_CASE : Any = model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = {}
for w in ["word_embeddings", "position_embeddings"]:
SCREAMING_SNAKE_CASE : Dict = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : List[str] = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
SCREAMING_SNAKE_CASE : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : Tuple = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
SCREAMING_SNAKE_CASE : Dict = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
SCREAMING_SNAKE_CASE : Tuple = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
SCREAMING_SNAKE_CASE : Dict = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
SCREAMING_SNAKE_CASE : List[str] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
SCREAMING_SNAKE_CASE : Any = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
SCREAMING_SNAKE_CASE : int = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
SCREAMING_SNAKE_CASE : str = state_dict["cls.predictions.decoder.weight"]
SCREAMING_SNAKE_CASE : Optional[int] = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE : List[str] = state_dict[F"cls.predictions.transform.dense.{w}"]
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 21
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18
| 0
|
from string import ascii_lowercase, ascii_uppercase
def UpperCamelCase ( _A ):
"""simple docstring"""
if not sentence:
return ""
__magic_name__ : Union[str, Any] = dict(zip(_A, _A ) )
return lower_to_upper.get(sentence[0], sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 138
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__magic_name__ : Dict = precision
__magic_name__ : str = ceil(precision / 14 )
__magic_name__ : List[str] = 426880 * Decimal(10005 ).sqrt()
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = 13591409
__magic_name__ : Tuple = Decimal(_A )
for k in range(1, _A ):
__magic_name__ : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_A ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__magic_name__: Tuple = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 138
| 1
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__lowerCamelCase = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = object_detector(examples[0] , threshold=0.0 )
__lowerCamelCase = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__lowerCamelCase = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
__lowerCamelCase = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline('''zero-shot-object-detection''' )
__lowerCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
__lowerCamelCase = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 0.2
__lowerCamelCase = pipeline('''zero-shot-object-detection''' )
__lowerCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 2
__lowerCamelCase = pipeline('''zero-shot-object-detection''' )
__lowerCamelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 330
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]:
return EnvironmentCommand()
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def lowercase_ ( __lowercase ) -> List[Any]:
lowerCAmelCase_ : List[str] = parser.add_parser('''env''' )
download_parser.set_defaults(func=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = huggingface_hub.__version__
lowerCAmelCase_ : str = '''not installed'''
lowerCAmelCase_ : str = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase_ : Any = torch.__version__
lowerCAmelCase_ : str = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = '''not installed'''
if is_transformers_available():
import transformers
lowerCAmelCase_ : Any = transformers.__version__
lowerCAmelCase_ : Optional[Any] = '''not installed'''
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : List[Any] = accelerate.__version__
lowerCAmelCase_ : List[str] = '''not installed'''
if is_xformers_available():
import xformers
lowerCAmelCase_ : Optional[Any] = xformers.__version__
lowerCAmelCase_ : int = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__lowercase ) )
return info
@staticmethod
def lowercase_ ( __lowercase ) -> str:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 262
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 16
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def __A ( self ) -> Any:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""},
] , )
__UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
__UpperCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
__UpperCAmelCase : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
__UpperCAmelCase : str = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
@require_torch
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(__UpperCAmelCase )
@slow
@require_tf
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
__UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : int = None
self.run_pipeline_test(__UpperCAmelCase , [] )
@require_tf
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : str = None
self.run_pipeline_test(__UpperCAmelCase , [] )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
__UpperCAmelCase : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : int = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = fill_masker.tokenizer
__UpperCAmelCase : Union[str, Any] = fill_masker.model
__UpperCAmelCase : Tuple = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : int = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
__UpperCAmelCase , [
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
] , )
with self.assertRaises(__UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__UpperCAmelCase ):
fill_masker("""This is""" )
self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase )
self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase )
self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase )
self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Dict = tokenizer.get_vocab()
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:2]
# Pipeline argument
__UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase )
__UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : Any = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase )
__UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) )
# Call argument
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : List[Any] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase )
__UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) )
# Score equivalence
__UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
__UpperCAmelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs]
__UpperCAmelCase : str = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase ) == set(__UpperCAmelCase ):
__UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase )
__UpperCAmelCase : int = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Any = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] )
with self.assertRaises(__UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 )
__UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : int = tokenizer.get_vocab()
__UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
# top_k=2, ntargets=3
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:3]
__UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__UpperCAmelCase : Tuple = [el["""token_str"""] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
__UpperCAmelCase : Dict = sorted(vocab.keys() )[:3]
__UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__UpperCAmelCase : Optional[int] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=__UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__UpperCAmelCase ) , 3 )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__UpperCAmelCase : Dict = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
[
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
{"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )},
],
] , )
| 16
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCamelCase = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class UpperCAmelCase ( unittest.TestCase ,A_ ):
def _SCREAMING_SNAKE_CASE (self : Dict ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = load_tool("text-question-answering" )
self.tool.setup()
snake_case : str = load_tool("text-question-answering" , remote=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Tuple = self.tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = self.remote_tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Dict = self.tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : List[Any] = self.remote_tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
| 59
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 211
| 0
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
snake_case__ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ (_A ):
"""simple docstring"""
_lowerCAmelCase = field(
default=0.0, metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
_lowerCAmelCase = field(default=_A, metadata={'help': 'Whether to SortishSamler or not.'} )
_lowerCAmelCase = field(
default=_A, metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_lowerCAmelCase = field(default=_A, metadata={'help': 'whether to use adafactor'} )
_lowerCAmelCase = field(
default=_A, metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
_lowerCAmelCase = field(
default=_A, metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
_lowerCAmelCase = field(default=_A, metadata={'help': 'Dropout probability. Goes into model.config.'} )
_lowerCAmelCase = field(
default=_A, metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
_lowerCAmelCase = field(
default='linear', metadata={'help': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''}, )
| 362
|
'''simple docstring'''
from collections.abc import Sequence
def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float:
if not arr:
return 0
A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' )
A_ : str = 0.0
for num in arr:
A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num )
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 4
| 0
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__A = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__A = "main"
# Default branch name
__A = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__A = "aaaaaaa"
# This commit does not exist, so we should 404.
__A = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__A = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
print("Bonjour!" )
yield
print("Au revoir!" )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers") is not None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : List[Any]) ->Dict:
'''simple docstring'''
with ContextManagers([]):
print("Transformers are awesome!")
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n")
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[str]) ->Any:
'''simple docstring'''
with ContextManagers([context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n")
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict) ->List[Any]:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n")
@require_torch
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
self.assertEqual(find_labels(UpperCAmelCase_) , ["labels"])
self.assertEqual(find_labels(UpperCAmelCase_) , ["labels", "next_sentence_label"])
self.assertEqual(find_labels(UpperCAmelCase_) , ["start_positions", "end_positions"])
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCAmelCase_) , ["labels"])
@require_tf
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
self.assertEqual(find_labels(UpperCAmelCase_) , ["labels"])
self.assertEqual(find_labels(UpperCAmelCase_) , ["labels", "next_sentence_label"])
self.assertEqual(find_labels(UpperCAmelCase_) , ["start_positions", "end_positions"])
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCAmelCase_) , ["labels"])
@require_flax
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any:
'''simple docstring'''
self.assertEqual(find_labels(UpperCAmelCase_) , [])
self.assertEqual(find_labels(UpperCAmelCase_) , [])
self.assertEqual(find_labels(UpperCAmelCase_) , [])
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCAmelCase_) , [])
| 10
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
SCREAMING_SNAKE_CASE = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
SCREAMING_SNAKE_CASE = F'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , 'README.md' )
print(F'Generating {path}' )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
__UpperCamelCase = Path(__file__).resolve().parent.parent.parent
__UpperCamelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCamelCase,__UpperCamelCase,__UpperCamelCase = model_name.split('''-''')
__UpperCamelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 113
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = AudioLDMPipeline
__snake_case = TEXT_TO_AUDIO_PARAMS
__snake_case = TEXT_TO_AUDIO_BATCH_PARAMS
__snake_case = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowercase__ , )
A__ : Optional[int] =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , )
torch.manual_seed(0 )
A__ : str =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A__ : List[str] =ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
A__ : Tuple =ClapTextModelWithProjection(lowercase__ )
A__ : List[str] =RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
A__ : Optional[Any] =SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowercase__ , )
A__ : List[Any] =SpeechTaHifiGan(lowercase__ )
A__ : Tuple ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=0 ) -> Dict:
'''simple docstring'''
if str(lowercase__ ).startswith("""mps""" ):
A__ : str =torch.manual_seed(lowercase__ )
else:
A__ : Optional[int] =torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
A__ : Tuple ={
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def lowercase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A__ : Any ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : List[str] =self.get_dummy_components()
A__ : Any =AudioLDMPipeline(**lowercase__ )
A__ : Optional[Any] =audioldm_pipe.to(lowercase__ )
audioldm_pipe.set_progress_bar_config(disable=lowercase__ )
A__ : Optional[int] =self.get_dummy_inputs(lowercase__ )
A__ : List[str] =audioldm_pipe(**lowercase__ )
A__ : Dict =output.audios[0]
assert audio.ndim == 1
assert len(lowercase__ ) == 2_56
A__ : Union[str, Any] =audio[:10]
A__ : Any =np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Tuple =self.get_dummy_components()
A__ : Any =AudioLDMPipeline(**lowercase__ )
A__ : Tuple =audioldm_pipe.to(lowercase__ )
A__ : Union[str, Any] =audioldm_pipe.to(lowercase__ )
audioldm_pipe.set_progress_bar_config(disable=lowercase__ )
A__ : Optional[Any] =self.get_dummy_inputs(lowercase__ )
A__ : Optional[int] =3 * [inputs["""prompt"""]]
# forward
A__ : int =audioldm_pipe(**lowercase__ )
A__ : Any =output.audios[0]
A__ : Union[str, Any] =self.get_dummy_inputs(lowercase__ )
A__ : int =3 * [inputs.pop("""prompt""" )]
A__ : Any =audioldm_pipe.tokenizer(
lowercase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors="""pt""" , )
A__ : str =text_inputs["""input_ids"""].to(lowercase__ )
A__ : Union[str, Any] =audioldm_pipe.text_encoder(
lowercase__ , )
A__ : Union[str, Any] =prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A__ : Any =F.normalize(lowercase__ , dim=-1 )
A__ : List[Any] =prompt_embeds
# forward
A__ : Union[str, Any] =audioldm_pipe(**lowercase__ )
A__ : Dict =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : int =self.get_dummy_components()
A__ : Union[str, Any] =AudioLDMPipeline(**lowercase__ )
A__ : Tuple =audioldm_pipe.to(lowercase__ )
A__ : int =audioldm_pipe.to(lowercase__ )
audioldm_pipe.set_progress_bar_config(disable=lowercase__ )
A__ : Any =self.get_dummy_inputs(lowercase__ )
A__ : Any =3 * ["""this is a negative prompt"""]
A__ : Dict =negative_prompt
A__ : int =3 * [inputs["""prompt"""]]
# forward
A__ : List[str] =audioldm_pipe(**lowercase__ )
A__ : str =output.audios[0]
A__ : Optional[Any] =self.get_dummy_inputs(lowercase__ )
A__ : Tuple =3 * [inputs.pop("""prompt""" )]
A__ : Optional[Any] =[]
for p in [prompt, negative_prompt]:
A__ : List[Any] =audioldm_pipe.tokenizer(
lowercase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors="""pt""" , )
A__ : List[str] =text_inputs["""input_ids"""].to(lowercase__ )
A__ : Union[str, Any] =audioldm_pipe.text_encoder(
lowercase__ , )
A__ : Tuple =text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A__ : Optional[Any] =F.normalize(lowercase__ , dim=-1 )
embeds.append(lowercase__ )
A__ , A__ : Dict =embeds
# forward
A__ : Optional[Any] =audioldm_pipe(**lowercase__ )
A__ : Optional[Any] =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : List[str] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : List[Any] =self.get_dummy_components()
A__ : Dict =PNDMScheduler(skip_prk_steps=lowercase__ )
A__ : int =AudioLDMPipeline(**lowercase__ )
A__ : List[str] =audioldm_pipe.to(lowercase__ )
audioldm_pipe.set_progress_bar_config(disable=lowercase__ )
A__ : int =self.get_dummy_inputs(lowercase__ )
A__ : Optional[int] ="""egg cracking"""
A__ : Dict =audioldm_pipe(**lowercase__ , negative_prompt=lowercase__ )
A__ : Optional[int] =output.audios[0]
assert audio.ndim == 1
assert len(lowercase__ ) == 2_56
A__ : Any =audio[:10]
A__ : str =np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Optional[Any] =self.get_dummy_components()
A__ : Any =PNDMScheduler(skip_prk_steps=lowercase__ )
A__ : Dict =AudioLDMPipeline(**lowercase__ )
A__ : Any =audioldm_pipe.to(lowercase__ )
audioldm_pipe.set_progress_bar_config(disable=lowercase__ )
A__ : str ="""A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
A__ : Tuple =audioldm_pipe(lowercase__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
A__ : int =2
A__ : List[Any] =audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
A__ : List[str] =2
A__ : Optional[int] =audioldm_pipe(lowercase__ , num_inference_steps=2 , num_waveforms_per_prompt=lowercase__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
A__ : Tuple =2
A__ : Optional[int] =audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowercase__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
A__ : Union[str, Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : List[Any] =self.get_dummy_components()
A__ : Any =AudioLDMPipeline(**lowercase__ )
A__ : Optional[int] =audioldm_pipe.to(lowercase__ )
audioldm_pipe.set_progress_bar_config(disable=lowercase__ )
A__ : str =audioldm_pipe.vocoder.config.sampling_rate
A__ : Any =self.get_dummy_inputs(lowercase__ )
A__ : Dict =audioldm_pipe(audio_length_in_s=0.016 , **lowercase__ )
A__ : List[Any] =output.audios[0]
assert audio.ndim == 1
assert len(lowercase__ ) / vocoder_sampling_rate == 0.016
A__ : Union[str, Any] =audioldm_pipe(audio_length_in_s=0.032 , **lowercase__ )
A__ : Any =output.audios[0]
assert audio.ndim == 1
assert len(lowercase__ ) / vocoder_sampling_rate == 0.032
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
A__ : int =self.get_dummy_components()
A__ : int =AudioLDMPipeline(**lowercase__ )
A__ : int =audioldm_pipe.to(lowercase__ )
audioldm_pipe.set_progress_bar_config(disable=lowercase__ )
A__ : Optional[Any] =["""hey"""]
A__ : Any =audioldm_pipe(lowercase__ , num_inference_steps=1 )
A__ : int =output.audios.shape
assert audio_shape == (1, 2_56)
A__ : List[str] =audioldm_pipe.vocoder.config
config.model_in_dim *= 2
A__ : Any =SpeechTaHifiGan(lowercase__ ).to(lowercase__ )
A__ : Optional[int] =audioldm_pipe(lowercase__ , num_inference_steps=1 )
A__ : Dict =output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowercase__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ )
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict="cpu" , lowerCAmelCase_ : List[Any]=torch.floataa , lowerCAmelCase_ : Tuple=0 ) -> List[str]:
'''simple docstring'''
A__ : Tuple =torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
A__ : List[str] =np.random.RandomState(lowercase__ ).standard_normal((1, 8, 1_28, 16) )
A__ : Any =torch.from_numpy(lowercase__ ).to(device=lowercase__ , dtype=lowercase__ )
A__ : int ={
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
A__ : Optional[int] =AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
A__ : Tuple =audioldm_pipe.to(lowercase__ )
audioldm_pipe.set_progress_bar_config(disable=lowercase__ )
A__ : Dict =self.get_inputs(lowercase__ )
A__ : Any =25
A__ : Tuple =audioldm_pipe(**lowercase__ ).audios[0]
assert audio.ndim == 1
assert len(lowercase__ ) == 8_19_20
A__ : Any =audio[7_72_30:7_72_40]
A__ : List[Any] =np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
A__ : Union[str, Any] =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
A__ : Tuple =AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
A__ : List[str] =LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
A__ : Dict =audioldm_pipe.to(lowercase__ )
audioldm_pipe.set_progress_bar_config(disable=lowercase__ )
A__ : Tuple =self.get_inputs(lowercase__ )
A__ : Dict =audioldm_pipe(**lowercase__ ).audios[0]
assert audio.ndim == 1
assert len(lowercase__ ) == 8_19_20
A__ : Optional[int] =audio[2_77_80:2_77_90]
A__ : Tuple =np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
A__ : Tuple =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 358
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A__ : Any =4
A__ : int =(1 << p) - 1
for _ in range(p - 2 ):
A__ : Dict =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 136
| 0
|
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( UpperCAmelCase_ = 3 ):
if isinstance(__A , __A ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(__A ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 1_0:
raise ValueError('number of qubits too large to simulate(>10).' )
_UpperCamelCase : Optional[Any] = QuantumRegister(__A , 'qr' )
_UpperCamelCase : Any = ClassicalRegister(__A , 'cr' )
_UpperCamelCase : Any = QuantumCircuit(__A , __A )
_UpperCamelCase : str = number_of_qubits
for i in range(__A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __A , __A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__A , __A )
# simulate with 10000 shots
_UpperCamelCase : Optional[int] = Aer.get_backend('qasm_simulator' )
_UpperCamelCase : List[Any] = execute(__A , __A , shots=1_0_0_0_0 )
return job.result().get_counts(__A )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 83
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("DownEncoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_=True , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
# down
_snake_case = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
_snake_case = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = 2 * out_channels if double_z else out_channels
_snake_case = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = x
_snake_case = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
_snake_case = down_block(lowerCAmelCase_ )
# middle
_snake_case = self.mid_block(lowerCAmelCase_ )
# post-process
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("UpDecoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_="group" , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
_snake_case = in_channels if norm_type == 'spatial' else None
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
_snake_case = list(reversed(lowerCAmelCase_ ) )
_snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = reversed_block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
_snake_case = output_channel
# out
if norm_type == "spatial":
_snake_case = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
_snake_case = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = z
_snake_case = self.conv_in(lowerCAmelCase_ )
_snake_case = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
_snake_case = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
else:
_snake_case = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="random" , lowerCAmelCase_=False , lowerCAmelCase_=True ):
"""simple docstring"""
super().__init__()
_snake_case = n_e
_snake_case = vq_embed_dim
_snake_case = beta
_snake_case = legacy
_snake_case = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_snake_case = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_snake_case = self.used.shape[0]
_snake_case = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_snake_case = self.re_embed
_snake_case = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
_snake_case = n_e
_snake_case = sane_index_shape
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
_snake_case = (inds[:, :, None] == used[None, None, ...]).long()
_snake_case = match.argmax(-1 )
_snake_case = match.sum(2 ) < 1
if self.unknown_index == "random":
_snake_case = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_snake_case = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_snake_case = 0 # simply set to zero
_snake_case = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = z.permute(0 , 2 , 3 , 1 ).contiguous()
_snake_case = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_snake_case = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
_snake_case = self.embedding(lowerCAmelCase_ ).view(z.shape )
_snake_case = None
_snake_case = None
# compute loss for embedding
if not self.legacy:
_snake_case = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_snake_case = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_snake_case = z + (z_q - z).detach()
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_snake_case = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_snake_case = self.remap_to_used(lowerCAmelCase_ )
_snake_case = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_snake_case = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.remap is not None:
_snake_case = indices.reshape(shape[0] , -1 ) # add batch axis
_snake_case = self.unmap_to_all(lowerCAmelCase_ )
_snake_case = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_snake_case = self.embedding(lowerCAmelCase_ )
if shape is not None:
_snake_case = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = parameters
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
_snake_case = torch.clamp(self.logvar , -30.0 , 20.0 )
_snake_case = deterministic
_snake_case = torch.exp(0.5 * self.logvar )
_snake_case = torch.exp(self.logvar )
if self.deterministic:
_snake_case = _snake_case = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_snake_case = self.mean + self.std * sample
return x
def lowerCamelCase ( self , lowerCAmelCase_=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_snake_case = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
return self.mean
| 42
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = ConsistencyModelPipeline
lowercase__ : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase__ : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase__ : Dict = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def snake_case__ ( self ):
_lowerCamelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def snake_case__ ( self ):
_lowerCamelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def snake_case__ ( self , lowerCamelCase__=False ):
if class_cond:
_lowerCamelCase = self.dummy_cond_unet
else:
_lowerCamelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
_lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [2_2, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
_lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components(class_cond=lowerCamelCase__ )
_lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
_lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 0
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
_lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 1
_lowerCamelCase = None
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components(class_cond=lowerCamelCase__ )
_lowerCamelCase = ConsistencyModelPipeline(**lowerCamelCase__ )
_lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 1
_lowerCamelCase = None
_lowerCamelCase = 0
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__=0 , lowerCamelCase__=False , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=(1, 3, 6_4, 6_4) ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''num_inference_steps''': None,
'''timesteps''': [2_2, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
_lowerCamelCase = self.get_fixed_latents(seed=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ , shape=lowerCamelCase__ )
_lowerCamelCase = latents
return inputs
def snake_case__ ( self , lowerCamelCase__=0 , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=(1, 3, 6_4, 6_4) ):
if type(lowerCamelCase__ ) == str:
_lowerCamelCase = torch.device(lowerCamelCase__ )
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
return latents
def snake_case__ ( self ):
_lowerCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs()
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def snake_case__ ( self ):
_lowerCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs()
_lowerCamelCase = 1
_lowerCamelCase = None
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def snake_case__ ( self ):
_lowerCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(get_fixed_latents=lowerCamelCase__ , device=lowerCamelCase__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase__ , enable_math=lowerCamelCase__ , enable_mem_efficient=lowerCamelCase__ ):
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def snake_case__ ( self ):
_lowerCamelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_lowerCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_lowerCamelCase = ConsistencyModelPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
pipe.to(torch_device=lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(get_fixed_latents=lowerCamelCase__ , device=lowerCamelCase__ )
_lowerCamelCase = 1
_lowerCamelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase__ , enable_math=lowerCamelCase__ , enable_mem_efficient=lowerCamelCase__ ):
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase = image[0, -3:, -3:, -1]
_lowerCamelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 73
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Dict = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[int] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->List[Any]:
'''simple docstring'''
a : str = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
a : str = [144, 192, 240]
a : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
a : List[str] = [96, 120, 144]
a : Dict = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
a : List[Any] = [64, 80, 96]
a : Any = [16, 16, 24, 48, 64, 80, 320]
a : List[str] = 0.05
a : Dict = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
a : Optional[Any] = 512
a : List[Any] = 16
a : List[str] = 21
a : int = """pascal-voc-id2label.json"""
else:
a : Optional[Any] = 1000
a : Optional[Any] = """imagenet-1k-id2label.json"""
a : Tuple = """huggingface/label-files"""
a : str = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
a : Optional[int] = {int(_A ): v for k, v in idalabel.items()}
a : Any = idalabel
a : Dict = {v: k for k, v in idalabel.items()}
return config
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Dict=False ) ->int:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
a : Optional[int] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
a : Optional[int] = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
a : Optional[int] = name.replace(".block." , "." )
if "exp_1x1" in name:
a : int = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
a : Any = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
a : Optional[Any] = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
a : Union[str, Any] = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
a : List[str] = name.replace(".norm." , ".normalization." )
if ".conv." in name:
a : List[Any] = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
a : str = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
a : List[str] = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
a : Tuple = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
a : Dict = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
a : Union[str, Any] = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
a : Union[str, Any] = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
a : Union[str, Any] = name.replace(F""".global_rep.{i}.weight""" , ".layernorm.weight" )
if F""".global_rep.{i}.bias""" in name:
a : Any = name.replace(F""".global_rep.{i}.bias""" , ".layernorm.bias" )
if ".global_rep." in name:
a : int = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
a : Union[str, Any] = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
a : List[str] = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
a : Dict = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
a : Union[str, Any] = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
a : Dict = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
a : List[Any] = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
a : str = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
a : Any = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
a : List[Any] = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
a : Any = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
a : Any = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
a : Optional[int] = """mobilevit.""" + name
return name
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Tuple , _lowercase : Union[str, Any]=False ) ->List[Any]:
'''simple docstring'''
if base_model:
a : str = """"""
else:
a : List[str] = """mobilevit."""
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(_A )
if key[:8] == "encoder.":
a : Dict = key[8:]
if "qkv" in key:
a : List[str] = key.split("." )
a : str = int(key_split[0][6:] ) - 1
a : Any = int(key_split[3] )
a : int = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
a : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size
a : str = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
a : Tuple = val[:dim, :]
a : Union[str, Any] = val[dim : dim * 2, :]
a : Any = val[-dim:, :]
else:
a : Tuple = val[:dim]
a : str = val[dim : dim * 2]
a : List[str] = val[-dim:]
else:
a : Any = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a : Optional[int] = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Dict , _lowercase : Any , _lowercase : Any=False ) ->Tuple:
'''simple docstring'''
a : Tuple = get_mobilevit_config(_A )
# load original state_dict
a : Optional[int] = torch.load(_A , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
a : Tuple = MobileViTForSemanticSegmentation(_A ).eval()
else:
a : Dict = MobileViTForImageClassification(_A ).eval()
a : str = convert_state_dict(_A , _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by MobileViTImageProcessor
a : Any = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
a : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
a : Union[str, Any] = model(**_A )
a : str = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
a : Dict = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
a : Optional[int] = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
a : int = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _A , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
a : Any = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
a : int = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
a : Any = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if push_to_hub:
a : Dict = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("Pushing to the hub..." )
a : Optional[Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(_A , organization="apple" )
model.push_to_hub(_A , organization="apple" )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a : int = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 105
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[32, 64, 1_28] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ) -> str:
__magic_name__ : Optional[int] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : str = embed_dim
__magic_name__ : int = hidden_sizes
__magic_name__ : Union[str, Any] = depths
__magic_name__ : List[str] = num_heads
__magic_name__ : str = window_size
__magic_name__ : Optional[Any] = mlp_ratio
__magic_name__ : Dict = qkv_bias
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : int = use_absolute_embeddings
__magic_name__ : Dict = patch_norm
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[int] = is_training
__magic_name__ : Optional[Any] = scope
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = encoder_stride
__magic_name__ : List[Any] = out_features
__magic_name__ : Union[str, Any] = out_indices
def __magic_name__ ( self ) -> str:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Any = FocalNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : int = FocalNetForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : int = self.type_sequence_label_size
__magic_name__ : Tuple = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = config_and_inputs
__magic_name__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Dict = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = FocalNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> List[str]:
return
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : str = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# FocalNet has a different seq_length
__magic_name__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ : str = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = reshaped_hidden_states[0].shape
__magic_name__ : Union[str, Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__magic_name__ : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = 3
__magic_name__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : str = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = FocalNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.default_image_processor
__magic_name__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__magic_name__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ : Optional[int] = FocalNetConfig
lowercase__ : Dict = False
def __magic_name__ ( self ) -> int:
__magic_name__ : Dict = FocalNetModelTester(self )
| 342
| 0
|
"""simple docstring"""
import os
import numpy
import onnx
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = """"""
snake_case_ = """"""
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
for n in graph_proto.node:
_node_replace_input_with(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = os.path.dirname(_SCREAMING_SNAKE_CASE )
snake_case_ = os.path.basename(_SCREAMING_SNAKE_CASE )
snake_case_ = onnx.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_SCREAMING_SNAKE_CASE )
dup_set.add(_SCREAMING_SNAKE_CASE )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_024 / 1_024 / 1_024 , """GB""" )
snake_case_ = sorted(_SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = """optimized_""" + model_file_name
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
onnx.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return new_model
| 360
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 233
| 0
|
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCAmelCase : Optional[int] =get_logger(__name__)
lowerCAmelCase : Dict =r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class a_ :
@add_start_docstrings(lowercase )
def __call__( self : Dict , lowercase : jnp.ndarray , lowercase : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class a_ :
@add_start_docstrings(lowercase )
def __call__( self : Optional[Any] , lowercase : jnp.ndarray , lowercase : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class a_ ( _lowerCAmelCase ):
@add_start_docstrings(lowercase )
def __call__( self : Optional[Any] , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int , **lowercase : Dict ):
"""simple docstring"""
for processor in self:
lowercase_ :Any = inspect.signature(processor.__call__ ).parameters
if len(lowercase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys() )} for '
F'{processor.__class__} are passed to the logits processor.' )
lowercase_ :str = processor(lowercase , lowercase , lowercase , **lowercase )
else:
lowercase_ :Union[str, Any] = processor(lowercase , lowercase , lowercase )
return scores
class a_ ( _lowerCAmelCase ):
def __init__( self : Dict , lowercase : float ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' )
lowercase_ :Optional[int] = temperature
def __call__( self : str , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = scores / self.temperature
return scores
class a_ ( _lowerCAmelCase ):
def __init__( self : str , lowercase : float , lowercase : float = -float("Inf" ) , lowercase : int = 1 ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(lowercase , lowercase ) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
lowercase_ :Union[str, Any] = top_p
lowercase_ :Tuple = filter_value
lowercase_ :Optional[int] = min_tokens_to_keep
def __call__( self : int , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ , lowercase_ :Any = lax.top_k(lowercase , scores.shape[-1] )
lowercase_ :List[str] = jnp.full_like(lowercase , self.filter_value )
lowercase_ :Optional[Any] = jax.nn.softmax(lowercase , axis=-1 ).cumsum(axis=-1 )
lowercase_ :Any = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowercase_ :Dict = jnp.roll(lowercase , 1 )
score_mask |= score_mask.at[:, 0].set(lowercase )
# min tokens to keep
lowercase_ :List[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(lowercase )
lowercase_ :Dict = jnp.where(lowercase , lowercase , lowercase )
lowercase_ :Optional[Any] = jax.lax.sort_key_val(lowercase , lowercase )[-1]
return next_scores
class a_ ( _lowerCAmelCase ):
def __init__( self : Optional[Any] , lowercase : int , lowercase : float = -float("Inf" ) , lowercase : int = 1 ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' )
lowercase_ :List[str] = max(lowercase , lowercase )
lowercase_ :Optional[int] = filter_value
def __call__( self : int , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ , lowercase_ :int = scores.shape
lowercase_ :str = jnp.full(batch_size * vocab_size , self.filter_value )
lowercase_ :Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
lowercase_ , lowercase_ :Any = lax.top_k(lowercase , lowercase )
lowercase_ :List[Any] = jnp.broadcast_to((jnp.arange(lowercase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowercase_ :Tuple = topk_scores.flatten()
lowercase_ :Any = topk_indices.flatten() + shift
lowercase_ :Union[str, Any] = next_scores_flat.at[topk_indices_flat].set(lowercase )
lowercase_ :Optional[int] = next_scores_flat.reshape(lowercase , lowercase )
return next_scores
class a_ ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , lowercase : int ):
"""simple docstring"""
lowercase_ :List[str] = bos_token_id
def __call__( self : List[str] , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = jnp.full(scores.shape , -float("inf" ) )
lowercase_ :str = 1 - jnp.bool_(cur_len - 1 )
lowercase_ :List[str] = jnp.where(lowercase , new_scores.at[:, self.bos_token_id].set(0 ) , lowercase )
return scores
class a_ ( _lowerCAmelCase ):
def __init__( self : Optional[Any] , lowercase : int , lowercase : int ):
"""simple docstring"""
lowercase_ :Tuple = max_length
lowercase_ :Optional[int] = eos_token_id
def __call__( self : Tuple , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ :Any = jnp.full(scores.shape , -float("inf" ) )
lowercase_ :Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowercase_ :List[Any] = jnp.where(lowercase , new_scores.at[:, self.eos_token_id].set(0 ) , lowercase )
return scores
class a_ ( _lowerCAmelCase ):
def __init__( self : Any , lowercase : int , lowercase : int ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(lowercase , lowercase ) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
lowercase_ :int = min_length
lowercase_ :str = eos_token_id
def __call__( self : str , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ :List[str] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowercase_ :Union[str, Any] = jnp.where(lowercase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , lowercase )
return scores
class a_ ( _lowerCAmelCase ):
def __init__( self : Dict , lowercase : Any , lowercase : List[str] ):
"""simple docstring"""
lowercase_ :List[str] = list(lowercase )
lowercase_ :Optional[int] = begin_index
def __call__( self : Tuple , lowercase : str , lowercase : Dict , lowercase : int ):
"""simple docstring"""
lowercase_ :Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
lowercase_ :List[Any] = jnp.where(lowercase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , lowercase )
return scores
class a_ ( _lowerCAmelCase ):
def __init__( self : List[Any] , lowercase : list ):
"""simple docstring"""
lowercase_ :Dict = list(lowercase )
def __call__( self : Any , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ :str = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class a_ ( _lowerCAmelCase ):
def __init__( self : List[str] , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = dict(lowercase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowercase_ :Union[str, Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowercase_ :List[str] = force_token_array.at[index].set(lowercase )
lowercase_ :Union[str, Any] = jnp.intaa(lowercase )
def __call__( self : List[Any] , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
def _force_token(lowercase : Optional[int] ):
lowercase_ :List[Any] = scores.shape[0]
lowercase_ :Optional[Any] = self.force_token_array[generation_idx]
lowercase_ :List[str] = jnp.ones_like(lowercase , dtype=scores.dtype ) * -float("inf" )
lowercase_ :Optional[int] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowercase_ :Optional[Any] = lax.dynamic_update_slice(lowercase , lowercase , (0, current_token) )
return new_scores
lowercase_ :List[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowercase ) , lambda: scores , ) , )
return scores
class a_ ( _lowerCAmelCase ):
def __init__( self : Dict , lowercase : str , lowercase : Any , lowercase : Any ):
"""simple docstring"""
lowercase_ :str = generate_config.eos_token_id
lowercase_ :Dict = generate_config.no_timestamps_token_id
lowercase_ :int = generate_config.no_timestamps_token_id + 1
lowercase_ :int = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowercase , "max_initial_timestamp_index" ):
lowercase_ :Union[str, Any] = generate_config.max_initial_timestamp_index
else:
lowercase_ :str = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowercase_ :Union[str, Any] = model_config.vocab_size
def __call__( self : Dict , lowercase : Tuple , lowercase : Tuple , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowercase : str , lowercase : int ):
lowercase_ :List[str] = jnp.where((cur_len - self.begin_index) >= 1 , lowercase , lowercase )
lowercase_ :List[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowercase , )
lowercase_ :Optional[Any] = jnp.where((cur_len - self.begin_index) < 2 , lowercase , lowercase )
lowercase_ :Optional[int] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowercase , lowercase , )
return jnp.where(
lowercase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , lowercase , )
lowercase_ :Optional[Any] = jax.vmap(lowercase )(lowercase , lowercase )
lowercase_ :Any = jnp.where(cur_len == self.begin_index , lowercase , lowercase )
lowercase_ :int = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowercase , )
lowercase_ :List[str] = self.timestamp_begin + self.max_initial_timestamp_index
lowercase_ :List[str] = jnp.where(
lowercase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , lowercase , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowercase_ :int = jax.nn.log_softmax(lowercase , axis=-1 )
def handle_cumulative_probs(lowercase : Dict , lowercase : Union[str, Any] ):
lowercase_ :Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowercase_ :Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , lowercase , )
lowercase_ :Dict = jax.vmap(lowercase )(lowercase , lowercase )
return scores
| 223
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[str] ='''▁'''
lowerCAmelCase : List[str] ={
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase : Optional[Any] ={
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
lowerCAmelCase : int ={
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
lowerCAmelCase : str ={
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class a_ ( _lowerCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = ["input_ids", "attention_mask"]
__A = []
__A = []
def __init__( self : Any , lowercase : Any , lowercase : List[Any] , lowercase : int=None , lowercase : Optional[Any]=None , lowercase : Union[str, Any]="<s>" , lowercase : Any="</s>" , lowercase : Optional[int]="</s>" , lowercase : List[Any]="<pad>" , lowercase : Optional[int]="<unk>" , lowercase : Optional[int]="m2m100" , lowercase : Optional[Dict[str, Any]] = None , lowercase : Any=8 , **lowercase : int , ):
"""simple docstring"""
lowercase_ :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ :Optional[Any] = language_codes
lowercase_ :Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowercase_ :List[Any] = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
lowercase_ :Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , )
lowercase_ :Optional[int] = vocab_file
lowercase_ :Any = load_json(lowercase )
lowercase_ :Optional[Any] = {v: k for k, v in self.encoder.items()}
lowercase_ :List[str] = spm_file
lowercase_ :List[str] = load_spm(lowercase , self.sp_model_kwargs )
lowercase_ :Optional[int] = len(self.encoder )
lowercase_ :int = {
self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase )
}
lowercase_ :List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )}
lowercase_ :List[Any] = {v: k for k, v in self.lang_token_to_id.items()}
lowercase_ :int = src_lang if src_lang is not None else "en"
lowercase_ :Union[str, Any] = tgt_lang
lowercase_ :List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowercase_ :int = num_madeup_words
@property
def lowercase__ ( self : List[str] ):
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowercase__ ( self : Any ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Optional[int] , lowercase : str ):
"""simple docstring"""
lowercase_ :str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Dict , lowercase : str ):
"""simple docstring"""
return self.sp_model.encode(lowercase , out_type=lowercase )
def lowercase__ ( self : Tuple , lowercase : Dict ):
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowercase , self.encoder[self.unk_token] )
def lowercase__ ( self : Any , lowercase : int ):
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowercase , self.unk_token )
def lowercase__ ( self : int , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = []
lowercase_ :Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
lowercase_ :str = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def lowercase__ ( self : Any , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
lowercase_ :List[Any] = [1] * len(self.prefix_tokens )
lowercase_ :List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase )) + suffix_ones
return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def lowercase__ ( self : Union[str, Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :str = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
"""simple docstring"""
lowercase_ :Any = self.__dict__.copy()
lowercase_ :str = None
return state
def __setstate__( self : Tuple , lowercase : Dict ):
"""simple docstring"""
lowercase_ :int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase_ :List[str] = {}
lowercase_ :List[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase__ ( self : str , lowercase : str , lowercase : Optional[str] = None ):
"""simple docstring"""
lowercase_ :Dict = Path(lowercase )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
lowercase_ :Dict = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowercase_ :Dict = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase )
elif not os.path.isfile(self.spm_file ):
with open(lowercase , "wb" ) as fi:
lowercase_ :List[str] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (str(lowercase ), str(lowercase ))
def lowercase__ ( self : List[str] , lowercase : List[str] , lowercase : str = "en" , lowercase : Optional[List[str]] = None , lowercase : str = "ro" , **lowercase : Optional[int] , ):
"""simple docstring"""
lowercase_ :int = src_lang
lowercase_ :Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def lowercase__ ( self : List[Any] , lowercase : Any , lowercase : Optional[str] , lowercase : Optional[str] , **lowercase : Union[str, Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowercase_ :List[str] = src_lang
lowercase_ :Union[str, Any] = self(lowercase , add_special_tokens=lowercase , **lowercase )
lowercase_ :str = self.get_lang_id(lowercase )
lowercase_ :Union[str, Any] = tgt_lang_id
return inputs
def lowercase__ ( self : str ):
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : str , lowercase : str ):
"""simple docstring"""
lowercase_ :List[str] = self.get_lang_token(lowercase )
lowercase_ :List[str] = self.lang_token_to_id[lang_token]
lowercase_ :List[Any] = [self.cur_lang_id]
lowercase_ :str = [self.eos_token_id]
def lowercase__ ( self : str , lowercase : str ):
"""simple docstring"""
lowercase_ :Optional[int] = self.get_lang_token(lowercase )
lowercase_ :Tuple = self.lang_token_to_id[lang_token]
lowercase_ :Dict = [self.cur_lang_id]
lowercase_ :List[Any] = [self.eos_token_id]
def lowercase__ ( self : Union[str, Any] , lowercase : str ):
"""simple docstring"""
return self.lang_code_to_token[lang]
def lowercase__ ( self : Dict , lowercase : str ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.get_lang_token(lowercase )
return self.lang_token_to_id[lang_token]
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Dict[str, Any] ):
lowercase_ :List[str] = sentencepiece.SentencePieceProcessor(**__lowerCamelCase )
spm.Load(str(__lowerCamelCase ) )
return spm
def UpperCAmelCase_ ( __lowerCamelCase : str ):
with open(__lowerCamelCase ,"r" ) as f:
return json.load(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : str ):
with open(__lowerCamelCase ,"w" ) as f:
json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=2 )
| 223
| 1
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __magic_name__ ( lowerCAmelCase__ ):
def __get__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any]=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowercase :Dict = '''__cached_''' + self.fget.__name__
lowercase :Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if cached is None:
lowercase :Optional[Any] = self.fget(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return cached
def lowerCamelCase (a_ :List[str]) -> str:
lowercase :Optional[int] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""")
def lowerCamelCase (a_ :Tuple) -> Dict:
if is_torch_fx_proxy(_A):
return True
if is_torch_available():
import torch
if isinstance(_A , torch.Tensor):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_A , tf.Tensor):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_A , (jnp.ndarray, Tracer)):
return True
return isinstance(_A , np.ndarray)
def lowerCamelCase (a_ :Optional[Any]) -> int:
return isinstance(_A , np.ndarray)
def lowerCamelCase (a_ :List[str]) -> Optional[Any]:
return _is_numpy(_A)
def lowerCamelCase (a_ :Tuple) -> List[str]:
import torch
return isinstance(_A , torch.Tensor)
def lowerCamelCase (a_ :Any) -> Optional[int]:
return False if not is_torch_available() else _is_torch(_A)
def lowerCamelCase (a_ :Union[str, Any]) -> List[str]:
import torch
return isinstance(_A , torch.device)
def lowerCamelCase (a_ :Any) -> Dict:
return False if not is_torch_available() else _is_torch_device(_A)
def lowerCamelCase (a_ :Union[str, Any]) -> str:
import torch
if isinstance(_A , _A):
if hasattr(_A , _A):
lowercase :Tuple = getattr(_A , _A)
else:
return False
return isinstance(_A , torch.dtype)
def lowerCamelCase (a_ :str) -> int:
return False if not is_torch_available() else _is_torch_dtype(_A)
def lowerCamelCase (a_ :Union[str, Any]) -> str:
import tensorflow as tf
return isinstance(_A , tf.Tensor)
def lowerCamelCase (a_ :List[str]) -> Dict:
return False if not is_tf_available() else _is_tensorflow(_A)
def lowerCamelCase (a_ :Optional[int]) -> str:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_A , '''is_symbolic_tensor'''):
return tf.is_symbolic_tensor(_A)
return type(_A) == tf.Tensor
def lowerCamelCase (a_ :Optional[Any]) -> Dict:
return False if not is_tf_available() else _is_tf_symbolic_tensor(_A)
def lowerCamelCase (a_ :str) -> Any:
import jax.numpy as jnp # noqa: F811
return isinstance(_A , jnp.ndarray)
def lowerCamelCase (a_ :Optional[int]) -> List[Any]:
return False if not is_flax_available() else _is_jax(_A)
def lowerCamelCase (a_ :str) -> Any:
if isinstance(_A , (dict, UserDict)):
return {k: to_py_obj(_A) for k, v in obj.items()}
elif isinstance(_A , (list, tuple)):
return [to_py_obj(_A) for o in obj]
elif is_tf_tensor(_A):
return obj.numpy().tolist()
elif is_torch_tensor(_A):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_A):
return np.asarray(_A).tolist()
elif isinstance(_A , (np.ndarray, np.number)): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCamelCase (a_ :Tuple) -> Any:
if isinstance(_A , (dict, UserDict)):
return {k: to_numpy(_A) for k, v in obj.items()}
elif isinstance(_A , (list, tuple)):
return np.array(_A)
elif is_tf_tensor(_A):
return obj.numpy()
elif is_torch_tensor(_A):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_A):
return np.asarray(_A)
else:
return obj
class __magic_name__ ( lowerCAmelCase__ ):
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Any = fields(self )
# Safety and consistency checks
if not len(_SCREAMING_SNAKE_CASE ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
lowercase :Optional[int] = getattr(self , class_fields[0].name )
lowercase :Dict = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase :int = first_field.items()
lowercase :int = True
else:
try:
lowercase :Dict = iter(_SCREAMING_SNAKE_CASE )
lowercase :Tuple = True
except TypeError:
lowercase :Dict = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_SCREAMING_SNAKE_CASE ):
if (
not isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) )
or not len(_SCREAMING_SNAKE_CASE ) == 2
or not isinstance(element[0] , _SCREAMING_SNAKE_CASE )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowercase :Tuple = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowercase :Union[str, Any] = element[1]
elif first_field is not None:
lowercase :int = first_field
else:
for field in class_fields:
lowercase :Optional[Any] = getattr(self , field.name )
if v is not None:
lowercase :List[Any] = v
def __delitem__( self : Any , *snake_case__ : Tuple , **snake_case__ : Tuple ):
'''simple docstring'''
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def __snake_case ( self : str , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
'''simple docstring'''
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def __snake_case ( self : Optional[int] , *snake_case__ : List[str] , **snake_case__ : List[str] ):
'''simple docstring'''
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def __snake_case ( self : Optional[Any] , *snake_case__ : List[Any] , **snake_case__ : Tuple ):
'''simple docstring'''
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Optional[Any] , snake_case__ : Optional[int] ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase :Dict = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Tuple , snake_case__ : List[Any] , snake_case__ : str ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
super().__setattr__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __setitem__( self : Dict , snake_case__ : int , snake_case__ : Dict ):
'''simple docstring'''
super().__setitem__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __snake_case ( self : str ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class __magic_name__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
@classmethod
def __snake_case ( cls : Tuple , snake_case__ : Dict ):
'''simple docstring'''
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __magic_name__ ( lowerCAmelCase__ ):
__A : Any = "longest"
__A : List[Any] = "max_length"
__A : Optional[int] = "do_not_pad"
class __magic_name__ ( lowerCAmelCase__ ):
__A : List[str] = "pt"
__A : Union[str, Any] = "tf"
__A : Optional[int] = "np"
__A : Optional[int] = "jax"
class __magic_name__ :
def __init__( self : Optional[Any] , snake_case__ : Any ):
'''simple docstring'''
lowercase :Tuple = context_managers
lowercase :Optional[int] = ExitStack()
def __enter__( self : Optional[Any] ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_SCREAMING_SNAKE_CASE )
def __exit__( self : Tuple , *snake_case__ : Any , **snake_case__ : str ):
'''simple docstring'''
self.stack.__exit__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCamelCase (a_ :Optional[int]) -> Optional[Any]:
lowercase :Union[str, Any] = infer_framework(_A)
if framework == "tf":
lowercase :Any = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
lowercase :Union[str, Any] = inspect.signature(model_class.forward) # PyTorch models
else:
lowercase :Optional[int] = inspect.signature(model_class.__call__) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCamelCase (a_ :str) -> List[Any]:
lowercase :Dict = model_class.__name__
lowercase :List[Any] = infer_framework(_A)
if framework == "tf":
lowercase :Any = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
lowercase :Optional[int] = inspect.signature(model_class.forward) # PyTorch models
else:
lowercase :int = inspect.signature(model_class.__call__) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCamelCase (a_ :MutableMapping , a_ :str = "" , a_ :str = ".") -> Any:
def _flatten_dict(a_ :Tuple , a_ :List[str]="" , a_ :str="."):
for k, v in d.items():
lowercase :Tuple = str(_A) + delimiter + str(_A) if parent_key else k
if v and isinstance(_A , _A):
yield from flatten_dict(_A , _A , delimiter=_A).items()
else:
yield key, v
return dict(_flatten_dict(_A , _A , _A))
@contextmanager
def lowerCamelCase (a_ :Dict , a_ :bool = False) -> Union[str, Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCamelCase (a_ :int , a_ :str=None) -> Tuple:
if is_numpy_array(_A):
return np.transpose(_A , axes=_A)
elif is_torch_tensor(_A):
return array.T if axes is None else array.permute(*_A)
elif is_tf_tensor(_A):
import tensorflow as tf
return tf.transpose(_A , perm=_A)
elif is_jax_tensor(_A):
return jnp.transpose(_A , axes=_A)
else:
raise ValueError(F"""Type not supported for transpose: {type(_A)}.""")
def lowerCamelCase (a_ :List[Any] , a_ :Union[str, Any]) -> Optional[Any]:
if is_numpy_array(_A):
return np.reshape(_A , _A)
elif is_torch_tensor(_A):
return array.reshape(*_A)
elif is_tf_tensor(_A):
import tensorflow as tf
return tf.reshape(_A , _A)
elif is_jax_tensor(_A):
return jnp.reshape(_A , _A)
else:
raise ValueError(F"""Type not supported for reshape: {type(_A)}.""")
def lowerCamelCase (a_ :Optional[int] , a_ :Tuple=None) -> int:
if is_numpy_array(_A):
return np.squeeze(_A , axis=_A)
elif is_torch_tensor(_A):
return array.squeeze() if axis is None else array.squeeze(dim=_A)
elif is_tf_tensor(_A):
import tensorflow as tf
return tf.squeeze(_A , axis=_A)
elif is_jax_tensor(_A):
return jnp.squeeze(_A , axis=_A)
else:
raise ValueError(F"""Type not supported for squeeze: {type(_A)}.""")
def lowerCamelCase (a_ :Dict , a_ :Any) -> Tuple:
if is_numpy_array(_A):
return np.expand_dims(_A , _A)
elif is_torch_tensor(_A):
return array.unsqueeze(dim=_A)
elif is_tf_tensor(_A):
import tensorflow as tf
return tf.expand_dims(_A , axis=_A)
elif is_jax_tensor(_A):
return jnp.expand_dims(_A , axis=_A)
else:
raise ValueError(F"""Type not supported for expand_dims: {type(_A)}.""")
def lowerCamelCase (a_ :List[Any]) -> Optional[int]:
if is_numpy_array(_A):
return np.size(_A)
elif is_torch_tensor(_A):
return array.numel()
elif is_tf_tensor(_A):
import tensorflow as tf
return tf.size(_A)
elif is_jax_tensor(_A):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(_A)}.""")
def lowerCamelCase (a_ :Union[str, Any] , a_ :str) -> int:
for key, value in auto_map.items():
if isinstance(_A , (tuple, list)):
lowercase :Tuple = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowercase :Optional[Any] = F"""{repo_id}--{value}"""
return auto_map
def lowerCamelCase (a_ :Dict) -> Union[str, Any]:
for base_class in inspect.getmro(_A):
lowercase :int = base_class.__module__
lowercase :Optional[int] = base_class.__name__
if module.startswith('''tensorflow''') or module.startswith('''keras''') or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''') or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''') or module.startswith('''jax''') or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""")
| 355
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase (a_ :int) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase () -> Optional[int]:
with parallel_backend('''spark'''):
assert ParallelBackendConfig.backend_name == "spark"
lowercase :Optional[int] = [1, 2, 3]
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=2)
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1])
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
lowercase :Optional[Any] = [1, 2]
lowercase :int = {'''a''': 1, '''b''': 2}
lowercase :List[Any] = {'''a''': [1, 2], '''b''': [3, 4]}
lowercase :Optional[int] = {'''a''': {'''1''': 1}, '''b''': 2}
lowercase :List[Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowercase :Optional[int] = [2, 3]
lowercase :Tuple = {'''a''': 2, '''b''': 3}
lowercase :Union[str, Any] = {'''a''': [2, 3], '''b''': [4, 5]}
lowercase :List[str] = {'''a''': {'''1''': 2}, '''b''': 3}
lowercase :Union[str, Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark'''):
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
| 172
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[str] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : Tuple = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def a_ ( ):
'''simple docstring'''
lowercase__ : List[Any] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Any = 'imagenet-1k-id2label.json'
lowercase__ : int = 1000
lowercase__ : Any = 'huggingface/label-files'
lowercase__ : Union[str, Any] = num_labels
lowercase__ : str = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) ) , 'r' ) )
lowercase__ : Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = CvtConfig(num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowercase__ : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowercase__ : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : Any = [2, 2, 20]
lowercase__ : Optional[Any] = [3, 12, 16]
lowercase__ : Dict = [192, 768, 1024]
lowercase__ : Union[str, Any] = CvtForImageClassification(_SCREAMING_SNAKE_CASE )
lowercase__ : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowercase__ : Tuple = image_size
lowercase__ : List[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
lowercase__ : List[Any] = OrderedDict()
lowercase__ : List[str] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Dict = list_of_state_dict + cls_token(_SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = list_of_state_dict + embeddings(_SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
lowercase__ : Any = list_of_state_dict + attention(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_84,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 77
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296
| 0
|
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ :Tuple = '''1'''
lowerCAmelCase__ :Any = '''0'''
lowerCAmelCase__ :int = '''1'''
lowerCAmelCase__ :List[Any] = ort.SessionOptions()
lowerCAmelCase__ :Dict = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ :Dict = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ :Any = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ :List[Any] = ort.RunOptions()
lowerCAmelCase__ :List[Any] = 1_2_8
lowerCAmelCase__ :Tuple = 1
lowerCAmelCase__ :Optional[Any] = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ :Any = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ :Optional[int] = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ :Optional[Any] = time.time()
lowerCAmelCase__ :str = 2_0_0_0
lowerCAmelCase__ :int = {}
for iter in range(max_iters):
lowerCAmelCase__ :Optional[Any] = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 185
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :List[str] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Any = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16
| 1
|
from __future__ import annotations
def UpperCamelCase ( _A ):
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(_A ) / len(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__: Tuple = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: int = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__magic_name__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138
| 1
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return "".join(chr(ord(_UpperCamelCase ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4
| 0
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = IFPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return self._get_dummy_components()
def lowerCamelCase ( self : Dict , _snake_case : List[str] , _snake_case : Union[str, Any]=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
self._test_save_load_local()
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa)
UpperCAmelCase_ = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=_snake_case , tokenizer=_snake_case)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
UpperCAmelCase_ = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase_ = None
UpperCAmelCase_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_snake_case , _snake_case , _snake_case , _snake_case)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase_ = IFImgaImgPipeline(**pipe_a.components)
UpperCAmelCase_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_snake_case , _snake_case , _snake_case , _snake_case)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase_ = IFInpaintingPipeline(**pipe_a.components)
UpperCAmelCase_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_snake_case , _snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int]):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Dict , _snake_case : Union[str, Any]):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : int , _snake_case : Dict):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_snake_case)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
def A () -> List[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 362
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7
| 0
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Dict=13 , UpperCAmelCase : str=30 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : int=0.02 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : int=0.6 , UpperCAmelCase : Optional[int]=None , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = mask_ratio
A_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self : List[str] ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self : int ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ):
A_ = TFViTMAEModel(config=UpperCAmelCase )
A_ = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFViTMAEForPreTraining(UpperCAmelCase )
A_ = model(UpperCAmelCase , training=UpperCAmelCase )
# expected sequence length = num_patches
A_ = (self.image_size // self.patch_size) ** 2
A_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ = 1
A_ = TFViTMAEForPreTraining(UpperCAmelCase )
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(UpperCAmelCase , training=UpperCAmelCase )
A_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self : Any ):
A_ = self.prepare_config_and_inputs()
((A_) , (A_) , (A_)) = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowerCamelCase : Union[str, Any] = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_lowerCamelCase : str = False
_lowerCamelCase : List[Any] = False
_lowerCamelCase : List[str] = False
_lowerCamelCase : List[str] = False
def __A ( self : List[str] ):
A_ = TFViTMAEModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def __A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __A ( self : List[Any] ):
pass
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , tf.keras.layers.Layer ) )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase )
def __A ( self : Dict ):
# make the mask reproducible
np.random.seed(2 )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = int((config.image_size // config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model(UpperCAmelCase , noise=UpperCAmelCase )
A_ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = model(**UpperCAmelCase , noise=UpperCAmelCase )
A_ = outputs_dict[0].numpy()
A_ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def __A ( self : Dict ):
# make the mask reproducible
np.random.seed(2 )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = int((config.image_size // config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCAmelCase : str ):
A_ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCAmelCase ):
A_ = v.numpy()
else:
A_ = np.array(UpperCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = prepare_numpy_arrays(UpperCAmelCase )
A_ = model(UpperCAmelCase , noise=UpperCAmelCase )
A_ = model(**UpperCAmelCase , noise=UpperCAmelCase )
self.assert_outputs_same(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ):
# make masks reproducible
np.random.seed(2 )
A_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ = tf.constant(UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ = tf_noise
super().check_pt_tf_models(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Any ):
# make mask reproducible
np.random.seed(2 )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCAmelCase )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(UpperCAmelCase , UpperCAmelCase ),)
if isinstance(UpperCAmelCase , UpperCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCAmelCase , "_keras_serializable" , UpperCAmelCase )
}
A_ = int((config.image_size // config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ = tf.convert_to_tensor(UpperCAmelCase )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
A_ = main_layer_class(UpperCAmelCase )
A_ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
A_ = tf.keras.Model(UpperCAmelCase , outputs=main_layer(UpperCAmelCase ) )
A_ = model(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(UpperCAmelCase , "keras_model.h5" )
model.save(UpperCAmelCase )
A_ = tf.keras.models.load_model(
UpperCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCAmelCase , tf.keras.Model )
A_ = model(UpperCAmelCase )
self.assert_outputs_same(UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : Any ):
# make mask reproducible
np.random.seed(2 )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = int((config.image_size // config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model(UpperCAmelCase , noise=UpperCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
A_ = outputs.last_hidden_state.numpy()
A_ = 0
else:
A_ = outputs.logits.numpy()
A_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = model_class.from_pretrained(UpperCAmelCase )
A_ = model(UpperCAmelCase , noise=UpperCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
A_ = after_outputs["last_hidden_state"].numpy()
A_ = 0
else:
A_ = after_outputs["logits"].numpy()
A_ = 0
A_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase , 1E-5 )
def __A ( self : str ):
# make mask reproducible
np.random.seed(2 )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = int((config.image_size // config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model(UpperCAmelCase , noise=UpperCAmelCase )
A_ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCAmelCase )
A_ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
A_ = model_class.from_config(model.config )
A_ = new_model(UpperCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
A_ = new_model(UpperCAmelCase , noise=UpperCAmelCase )
self.assert_outputs_same(UpperCAmelCase , UpperCAmelCase )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __A ( self : str ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __A ( self : Dict ):
pass
@slow
def __A ( self : Tuple ):
A_ = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(UpperCAmelCase )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : str ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __A ( self : int ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ = ViTMAEConfig()
A_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ = np.random.uniform(size=(1, num_patches) )
# forward pass
A_ = model(**UpperCAmelCase , noise=UpperCAmelCase )
# verify the logits
A_ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
A_ = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 312
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward()
| 312
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:int = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Union[str, Any] = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
SCREAMING_SNAKE_CASE__:str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 365
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:str = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """sew-d"""
def __init__( self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase=2 , lowerCamelCase=512 , lowerCamelCase=256 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=("p2c", "c2p") , lowerCamelCase="layer_norm" , lowerCamelCase="gelu_python" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1E-7 , lowerCamelCase=1E-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=True , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase="mean" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
__a = hidden_size
__a = feat_extract_norm
__a = feat_extract_activation
__a = list(lowerCamelCase )
__a = list(lowerCamelCase )
__a = list(lowerCamelCase )
__a = conv_bias
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = len(self.conv_dim )
__a = num_hidden_layers
__a = intermediate_size
__a = squeeze_factor
__a = max_position_embeddings
__a = position_buckets
__a = share_att_key
__a = relative_attention
__a = norm_rel_ebd
__a = list(lowerCamelCase )
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layer_norm_eps
__a = feature_layer_norm_eps
__a = initializer_range
__a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
__a = mask_feature_min_masks
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# sequence classification
__a = use_weighted_layer_sum
__a = classifier_proj_size
@property
def a__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 268
| 0
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCAmelCase_ ( __lowerCamelCase : Any="ro" ,__lowerCamelCase : Any="en" ,__lowerCamelCase : Any="wmt16" ,__lowerCamelCase : Dict=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
lowercase_ :str = F'{src_lang}-{tgt_lang}'
print(F'Converting {dataset}-{pair}' )
lowercase_ :Tuple = datasets.load_dataset(__lowerCamelCase ,__lowerCamelCase )
if save_dir is None:
lowercase_ :List[str] = F'{dataset}-{pair}'
lowercase_ :int = Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(F'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
lowercase_ :Any = "val" if split == "validation" else split
lowercase_ :int = save_dir.joinpath(F'{fn}.source' )
lowercase_ :int = save_dir.joinpath(F'{fn}.target' )
lowercase_ :Optional[int] = src_path.open("w+" )
lowercase_ :Optional[int] = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase_ :Optional[int] = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 223
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : List[str] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCAmelCase : Optional[int] ={
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase : List[Any] ={
'''RUCAIBox/mvp''': 1_024,
}
class a_ ( _lowerCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["input_ids", "attention_mask"]
__A = MvpTokenizer
def __init__( self : Optional[Any] , lowercase : Any=None , lowercase : List[Any]=None , lowercase : Dict=None , lowercase : int="replace" , lowercase : int="<s>" , lowercase : List[str]="</s>" , lowercase : Optional[Any]="</s>" , lowercase : List[str]="<s>" , lowercase : List[str]="<unk>" , lowercase : List[str]="<pad>" , lowercase : Tuple="<mask>" , lowercase : Tuple=False , lowercase : Dict=True , **lowercase : List[str] , ):
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
lowercase_ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowercase_ :List[str] = getattr(lowercase , pre_tok_state.pop("type" ) )
lowercase_ :int = add_prefix_space
lowercase_ :Optional[int] = pre_tok_class(**lowercase )
lowercase_ :Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ :List[Any] = "post_processor"
lowercase_ :str = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
lowercase_ :Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ :int = tuple(state["sep"] )
if "cls" in state:
lowercase_ :Any = tuple(state["cls"] )
lowercase_ :int = False
if state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowercase_ :Union[str, Any] = add_prefix_space
lowercase_ :int = True
if state.get("trim_offsets" , lowercase ) != trim_offsets:
lowercase_ :Any = trim_offsets
lowercase_ :int = True
if changes_to_apply:
lowercase_ :Tuple = getattr(lowercase , state.pop("type" ) )
lowercase_ :Any = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : int , lowercase : Dict ):
"""simple docstring"""
lowercase_ :List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
lowercase_ :Union[str, Any] = value
def lowercase__ ( self : Optional[Any] , *lowercase : List[Any] , **lowercase : Any ):
"""simple docstring"""
lowercase_ :Any = kwargs.get("is_split_into_words" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowercase , **lowercase )
def lowercase__ ( self : Optional[Any] , *lowercase : Optional[int] , **lowercase : int ):
"""simple docstring"""
lowercase_ :Any = kwargs.get("is_split_into_words" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowercase , **lowercase )
def lowercase__ ( self : Dict , lowercase : str , lowercase : Optional[str] = None ):
"""simple docstring"""
lowercase_ :str = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def lowercase__ ( self : Tuple , lowercase : Dict , lowercase : int=None ):
"""simple docstring"""
lowercase_ :List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : int , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
"""simple docstring"""
lowercase_ :Union[str, Any] = [self.sep_token_id]
lowercase_ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 223
| 1
|
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , _UpperCAmelCase )
_UpperCAmelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
_UpperCAmelCase = dataset_size < in_memory_max_size
else:
_UpperCAmelCase = False
_UpperCAmelCase = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 290
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290
| 1
|
from __future__ import annotations
import math
def UpperCAmelCase__ ( _A : int , _A : int , _A : bool , _A : list[int] , _A : float ):
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , )
)
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =[90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
a__ =math.log(len(lowerCAmelCase_ ) , 2 )
print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 188
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_A : Any = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_A : Tuple = '''document_qa'''
_A : Dict = AutoProcessor
_A : Tuple = VisionEncoderDecoderModel
_A : Optional[int] = ['''image''', '''text''']
_A : Optional[int] = ['''text''']
def __init__( self : Any , *__a : List[str] , **__a : Any ) -> Optional[Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*__a , **__a )
def lowerCAmelCase ( self : List[Any] , __a : "Image" , __a : str ) -> List[str]:
"""simple docstring"""
__lowercase : int = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowercase : str = task_prompt.replace("""{user_input}""" , __a )
__lowercase : Union[str, Any] = self.pre_processor.tokenizer(
__a , add_special_tokens=__a , return_tensors="""pt""" ).input_ids
__lowercase : int = self.pre_processor(__a , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase ( self : Optional[int] , __a : int ) -> int:
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.pre_processor.batch_decode(__a )[0]
__lowercase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
__lowercase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
__lowercase : Optional[Any] = re.sub(r"""<.*?>""" , """""" , __a , count=1 ).strip() # remove first task start token
__lowercase : Dict = self.pre_processor.tokenajson(__a )
return sequence["answer"]
| 233
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : List[str] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
for attribute in key.split('''.''' ):
snake_case_ = getattr(__UpperCAmelCase, __UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(__UpperCAmelCase, __UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, hf_model.config.feat_extract_norm == '''group''', )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
snake_case_ = mapped_key.replace('''*''', __UpperCAmelCase )
if "weight_g" in name:
snake_case_ = '''weight_g'''
elif "weight_v" in name:
snake_case_ = '''weight_v'''
elif "weight" in name:
snake_case_ = '''weight'''
elif "bias" in name:
snake_case_ = '''bias'''
else:
snake_case_ = None
set_recursively(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = full_name.split('''conv_layers.''' )[-1]
snake_case_ = name.split('''.''' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case_ = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case_ = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case_ = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case_ = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None, __UpperCAmelCase=None, __UpperCAmelCase=True ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
snake_case_ = HubertConfig.from_pretrained(__UpperCAmelCase )
else:
snake_case_ = HubertConfig()
if is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(__UpperCAmelCase, '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
with open(__UpperCAmelCase, '''w''', encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices, __UpperCAmelCase )
snake_case_ = WavaVecaCTCTokenizer(
__UpperCAmelCase, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='''|''', do_lower_case=__UpperCAmelCase, )
snake_case_ = True if config.feat_extract_norm == '''layer''' else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0, do_normalize=__UpperCAmelCase, return_attention_mask=__UpperCAmelCase, )
snake_case_ = WavaVecaProcessor(feature_extractor=__UpperCAmelCase, tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
snake_case_ = HubertForCTC(__UpperCAmelCase )
else:
snake_case_ = HubertModel(__UpperCAmelCase )
if is_finetuned:
snake_case_ ,snake_case_ ,snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
snake_case_ ,snake_case_ ,snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case_ = model[0].eval()
recursively_load_weights(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a : Optional[Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 72
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Optional[Any] = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class a ( _lowerCamelCase ):
snake_case_ = "xlm-roberta-xl"
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=25_0880 , lowercase_ : Tuple=2560 , lowercase_ : str=36 , lowercase_ : List[str]=32 , lowercase_ : Optional[Any]=1_0240 , lowercase_ : List[str]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=514 , lowercase_ : Any=1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Dict=1e-05 , lowercase_ : List[Any]=1 , lowercase_ : str=0 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]="absolute" , lowercase_ : str=True , lowercase_ : str=None , **lowercase_ : Tuple , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
class a ( _lowerCamelCase ):
@property
def A_ ( self : Optional[Any] ):
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 72
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> str:
UpperCAmelCase_ : str = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 1_28, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_42, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
UpperCAmelCase_ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 1_28,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 1_42,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A ( self : int ) -> str:
UpperCAmelCase_ : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , x.transpose() ) )
UpperCAmelCase_ : Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self : List[str] ) -> Any:
UpperCAmelCase_ : List[str] = np.random.randn(3 , 4 )
UpperCAmelCase_ : str = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) )
UpperCAmelCase_ : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ : Dict = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase_ : Any = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , transpose(UpperCAmelCase_ ).numpy() ) )
UpperCAmelCase_ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ : int = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , transpose(UpperCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase_ : Optional[Any] = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ ) , np.asarray(transpose(UpperCAmelCase_ ) ) ) )
UpperCAmelCase_ : Optional[int] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ : List[Any] = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCAmelCase_ , axes=(1, 2, 0) ) ) ) )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.reshape(UpperCAmelCase_ , (4, 3) ) ) )
UpperCAmelCase_ : Optional[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.reshape(UpperCAmelCase_ , (12, 5) ) ) )
@require_torch
def A ( self : List[str] ) -> int:
UpperCAmelCase_ : Dict = np.random.randn(3 , 4 )
UpperCAmelCase_ : Optional[Any] = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) )
UpperCAmelCase_ : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ : Union[str, Any] = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) )
@require_tf
def A ( self : List[Any] ) -> str:
UpperCAmelCase_ : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase_ : Any = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , reshape(UpperCAmelCase_ , (4, 3) ).numpy() ) )
UpperCAmelCase_ : str = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ : int = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , reshape(UpperCAmelCase_ , (12, 5) ).numpy() ) )
@require_flax
def A ( self : Optional[Any] ) -> int:
UpperCAmelCase_ : str = np.random.randn(3 , 4 )
UpperCAmelCase_ : Tuple = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3) ) , np.asarray(reshape(UpperCAmelCase_ , (4, 3) ) ) ) )
UpperCAmelCase_ : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ : Any = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5) ) , np.asarray(reshape(UpperCAmelCase_ , (12, 5) ) ) ) )
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.squeeze(UpperCAmelCase_ ) ) )
UpperCAmelCase_ : Tuple = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.squeeze(UpperCAmelCase_ , axis=2 ) ) )
@require_torch
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase_ : Dict = np.random.randn(1 , 3 , 4 )
UpperCAmelCase_ : List[str] = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) )
UpperCAmelCase_ : Dict = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase_ : Dict = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) )
@require_tf
def A ( self : List[Any] ) -> int:
UpperCAmelCase_ : List[str] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase_ : Optional[Any] = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , squeeze(UpperCAmelCase_ ).numpy() ) )
UpperCAmelCase_ : List[str] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase_ : Optional[Any] = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , squeeze(UpperCAmelCase_ , axis=2 ).numpy() ) )
@require_flax
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase_ : int = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ ) , np.asarray(squeeze(UpperCAmelCase_ ) ) ) )
UpperCAmelCase_ : Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase_ : Optional[int] = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2 ) , np.asarray(squeeze(UpperCAmelCase_ , axis=2 ) ) ) )
def A ( self : int ) -> Dict:
UpperCAmelCase_ : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.expand_dims(UpperCAmelCase_ , axis=1 ) ) )
@require_torch
def A ( self : List[str] ) -> int:
UpperCAmelCase_ : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase_ : Dict = torch.tensor(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Any:
UpperCAmelCase_ : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase_ : str = tf.constant(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , expand_dims(UpperCAmelCase_ , axis=1 ).numpy() ) )
@require_flax
def A ( self : Union[str, Any] ) -> str:
UpperCAmelCase_ : int = np.random.randn(3 , 4 )
UpperCAmelCase_ : Union[str, Any] = jnp.array(UpperCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1 ) , np.asarray(expand_dims(UpperCAmelCase_ , axis=1 ) ) ) )
| 304
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A : Tuple = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A : Tuple = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Dict = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ), dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowerCAmelCase : Optional[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Any = _readaa(_UpperCAmelCase )
lowerCAmelCase : List[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase : Any = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
lowerCAmelCase : Optional[int] = data.reshape(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, 1 )
return data
@deprecated(_UpperCAmelCase, 'Please use tf.one_hot on tensors.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = labels_dense.shape[0]
lowerCAmelCase : Union[str, Any] = numpy.arange(_UpperCAmelCase ) * num_classes
lowerCAmelCase : List[str] = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase : List[str] = 1
return labels_one_hot
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=10 ) -> List[str]:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowerCAmelCase : Optional[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Dict = bytestream.read(_UpperCAmelCase )
lowerCAmelCase : Dict = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase, _UpperCAmelCase )
return labels
class __A :
@deprecated(
UpperCAmelCase_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=dtypes.floataa , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=None , ):
lowerCAmelCase , lowerCAmelCase : int = random_seed.get_seed(UpperCAmelCase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase : List[str] = dtypes.as_dtype(UpperCAmelCase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowerCAmelCase : Dict = 10000
lowerCAmelCase : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase : Optional[int] = images.astype(numpy.floataa )
lowerCAmelCase : Dict = numpy.multiply(UpperCAmelCase_ , 1.0 / 2_55.0 )
lowerCAmelCase : List[str] = images
lowerCAmelCase : List[str] = labels
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
@property
def lowercase__ ( self : str ):
return self._images
@property
def lowercase__ ( self : Dict ):
return self._labels
@property
def lowercase__ ( self : List[Any] ):
return self._num_examples
@property
def lowercase__ ( self : Any ):
return self._epochs_completed
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True ):
if fake_data:
lowerCAmelCase : Union[str, Any] = [1] * 784
lowerCAmelCase : Dict = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCAmelCase_ )],
[fake_label for _ in range(UpperCAmelCase_ )],
)
lowerCAmelCase : Union[str, Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.images[perma]
lowerCAmelCase : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase : Tuple = self._num_examples - start
lowerCAmelCase : Union[str, Any] = self._images[start : self._num_examples]
lowerCAmelCase : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.images[perm]
lowerCAmelCase : Optional[Any] = self.labels[perm]
# Start next epoch
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Dict = batch_size - rest_num_examples
lowerCAmelCase : int = self._index_in_epoch
lowerCAmelCase : Union[str, Any] = self._images[start:end]
lowerCAmelCase : int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase : Optional[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase, 'Please write your own downloading logic.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase, _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
lowerCAmelCase : List[Any] = f.size()
print('Successfully downloaded', _UpperCAmelCase, _UpperCAmelCase, 'bytes.' )
return filepath
@deprecated(
_UpperCAmelCase, 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=dtypes.floataa, _UpperCAmelCase=True, _UpperCAmelCase=5_000, _UpperCAmelCase=None, _UpperCAmelCase=DEFAULT_SOURCE_URL, ) -> Tuple:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_UpperCAmelCase, one_hot=_UpperCAmelCase, dtype=_UpperCAmelCase, seed=_UpperCAmelCase )
lowerCAmelCase : Tuple = fake()
lowerCAmelCase : Optional[Any] = fake()
lowerCAmelCase : List[Any] = fake()
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
if not source_url: # empty string check
lowerCAmelCase : Any = DEFAULT_SOURCE_URL
lowerCAmelCase : Optional[Any] = 'train-images-idx3-ubyte.gz'
lowerCAmelCase : Any = 'train-labels-idx1-ubyte.gz'
lowerCAmelCase : int = 't10k-images-idx3-ubyte.gz'
lowerCAmelCase : Union[str, Any] = 't10k-labels-idx1-ubyte.gz'
lowerCAmelCase : str = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : Any = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Tuple = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : int = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : List[Any] = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Any = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : List[str] = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
lowerCAmelCase : str = (
'Validation size should be between 0 and '
f"{len(_UpperCAmelCase )}. Received: {validation_size}."
)
raise ValueError(_UpperCAmelCase )
lowerCAmelCase : str = train_images[:validation_size]
lowerCAmelCase : Dict = train_labels[:validation_size]
lowerCAmelCase : List[str] = train_images[validation_size:]
lowerCAmelCase : str = train_labels[validation_size:]
lowerCAmelCase : str = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowerCAmelCase : int = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
| 138
| 0
|
import fire
from utils import calculate_rouge, save_json
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=None , **_lowerCAmelCase : str ):
__UpperCamelCase : Union[str, Any] = [x.strip() for x in open(_lowerCAmelCase ).readlines()]
__UpperCamelCase : Optional[Any] = [x.strip() for x in open(_lowerCAmelCase ).readlines()][: len(_lowerCAmelCase )]
__UpperCamelCase : Optional[Any] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
if save_path is not None:
save_json(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 171
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase : Optional[int] = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def UpperCAmelCase_ (_lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int=None ):
# Initialise PyTorch model
__UpperCamelCase : str = XLNetConfig.from_json_file(_lowerCAmelCase )
__UpperCamelCase : int = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
__UpperCamelCase : List[str] = finetuning_task
__UpperCamelCase : List[str] = GLUE_TASKS_NUM_LABELS[finetuning_task]
__UpperCamelCase : Dict = XLNetForSequenceClassification(_lowerCAmelCase )
elif "squad" in finetuning_task:
__UpperCamelCase : List[str] = finetuning_task
__UpperCamelCase : Optional[int] = XLNetForQuestionAnswering(_lowerCAmelCase )
else:
__UpperCamelCase : Optional[int] = XLNetLMHeadModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
__UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
print(F'''Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}''' )
torch.save(model.state_dict() , _lowerCAmelCase )
print(F'''Save configuration file to {os.path.abspath(_lowerCAmelCase )}''' )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowercase : Dict = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 171
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.