code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_snake_case : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
_snake_case : str = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
_snake_case : List[str] = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def lowercase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
__lowerCAmelCase = 0.0
for i, j in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase_ , lowerCAmelCase_ ) else 0.0
__lowerCAmelCase = n_correct / len(lowerCAmelCase_ )
return {
"accuracy": accuracy,
}
| 284 | 0 |
from collections import defaultdict
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = first_str.lower().strip()
_lowerCAmelCase : Optional[Any] = second_str.lower().strip()
# Remove whitespace
_lowerCAmelCase : Optional[Any] = first_str.replace(""" """ , """""" )
_lowerCAmelCase : Any = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
return False
# Default values for count should be 0
_lowerCAmelCase : defaultdict[str, int] = defaultdict(UpperCamelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCamelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : Any = input("Enter the first string ").strip()
_lowerCamelCase : Union[str, Any] = input("Enter the second string ").strip()
_lowerCamelCase : List[Any] = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 352 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_lowerCAmelCase : Optional[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Tuple = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
_lowerCAmelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Tuple = image_processor(_UpperCAmelCase , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : int = processor(text=_UpperCAmelCase )
_lowerCAmelCase : Dict = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Any = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Tuple = """lower newer"""
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : str = processor.batch_decode(_UpperCAmelCase )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Dict = """lower newer"""
_lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 159 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def a__ ( lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=False ) -> str:
UpperCAmelCase__ : Union[str, Any] = """backbone.""" if is_semantic else """"""
UpperCAmelCase__ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", """beit.embeddings.cls_token"""),
(F"""{prefix}patch_embed.proj.weight""", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"""{prefix}patch_embed.proj.bias""", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"""{prefix}pos_embed""", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=False ) -> int:
for i in range(config.num_hidden_layers ):
UpperCAmelCase__ : int = """backbone.""" if is_semantic else """"""
# queries, keys and values
UpperCAmelCase__ : Tuple = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
UpperCAmelCase__ : Any = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
UpperCAmelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ : Dict = q_bias
UpperCAmelCase__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : int = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ : Optional[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase__ : Optional[Any] = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
UpperCAmelCase__ : str = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
UpperCAmelCase__ : Tuple = gamma_a
UpperCAmelCase__ : int = gamma_a
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Tuple = dct.pop(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = val
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : Tuple = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> int:
UpperCAmelCase__ : str = False if """rvlcdip""" in checkpoint_url else True
UpperCAmelCase__ : List[Any] = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase , use_mask_token=lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase__ : List[Any] = 10_24
UpperCAmelCase__ : List[Any] = 40_96
UpperCAmelCase__ : str = 24
UpperCAmelCase__ : str = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase__ : List[Any] = 16
UpperCAmelCase__ : List[Any] = """huggingface/label-files"""
UpperCAmelCase__ : int = """rvlcdip-id2label.json"""
UpperCAmelCase__ : str = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ : Union[str, Any] = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ : str = idalabel
UpperCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ : str = create_rename_keys(lowerCAmelCase , has_lm_head=lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , has_lm_head=lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase__ : Dict = BeitForMaskedImageModeling(lowerCAmelCase ) if has_lm_head else BeitForImageClassification(lowerCAmelCase )
model.eval()
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image
UpperCAmelCase__ : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase )
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : List[Any] = image_processor(images=lowerCAmelCase , return_tensors="""pt""" )
UpperCAmelCase__ : Optional[Any] = encoding["""pixel_values"""]
UpperCAmelCase__ : Tuple = model(lowerCAmelCase )
UpperCAmelCase__ : List[str] = outputs.logits
# verify logits
UpperCAmelCase__ : Any = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(lowerCAmelCase ), "Shape of logits not as expected"
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase__ : Dict = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
UpperCAmelCase__ : Union[str, Any] = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase , )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
_A = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 171 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 10_00,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_A = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 10_00,
"""block_out_channels""": [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_A = {
"""sample_size""": 2_56,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_A = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_A = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_A = {
"""num_train_timesteps""": 1_51,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def a__ ( lowerCAmelCase ) -> Tuple:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> List[str]:
UpperCAmelCase__ : int = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase__ : Optional[int] = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase__ : Optional[Any] = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase__ : str = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase__ : Any = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase__ : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase__ : str = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase__ : List[Any] = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase__ : Dict = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase__ : Union[str, Any] = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase__ : int = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase__ : Dict = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase__ : List[Any] = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase__ : str = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase__ : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Optional[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : int = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : int = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Any = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase__ : str = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : Optional[Any] = torch.load(lowerCAmelCase , map_location="""cpu""" )
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : List[Any] = checkpoint["""time_embed.0.weight"""]
UpperCAmelCase__ : str = checkpoint["""time_embed.0.bias"""]
UpperCAmelCase__ : List[str] = checkpoint["""time_embed.2.weight"""]
UpperCAmelCase__ : Dict = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase__ : Dict = checkpoint["""label_emb.weight"""]
UpperCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""]
UpperCAmelCase__ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
UpperCAmelCase__ : List[str] = unet_config["""down_block_types"""]
UpperCAmelCase__ : Tuple = unet_config["""layers_per_block"""]
UpperCAmelCase__ : int = unet_config["""attention_head_dim"""]
UpperCAmelCase__ : Union[str, Any] = unet_config["""block_out_channels"""]
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Union[str, Any] = channels_list[0]
for i, layer_type in enumerate(lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = channels_list[i]
UpperCAmelCase__ : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCAmelCase ):
UpperCAmelCase__ : Tuple = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase__ : List[Any] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase__ : Dict = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase__ : Optional[Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCAmelCase ):
UpperCAmelCase__ : Any = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase__ : Optional[Any] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase__ : int = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase__ : Union[str, Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
UpperCAmelCase__ : Dict = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase__ : int = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase__ : Union[str, Any] = convert_attention(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
current_layer += 1
if i != len(lowerCAmelCase ) - 1:
UpperCAmelCase__ : Any = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase__ : List[str] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase__ : Tuple = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
current_layer += 1
UpperCAmelCase__ : Tuple = current_channels
# hardcoded the mid-block for now
UpperCAmelCase__ : List[Any] = """mid_block.resnets.0"""
UpperCAmelCase__ : str = """middle_block.0"""
UpperCAmelCase__ : List[str] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[str] = """mid_block.attentions.0"""
UpperCAmelCase__ : Any = """middle_block.1"""
UpperCAmelCase__ : Optional[int] = convert_attention(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = """mid_block.resnets.1"""
UpperCAmelCase__ : Tuple = """middle_block.2"""
UpperCAmelCase__ : Union[str, Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : Dict = unet_config["""up_block_types"""]
for i, layer_type in enumerate(lowerCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase__ : Optional[Any] = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase__ : Dict = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
current_layer += 1
if i != len(lowerCAmelCase ) - 1:
UpperCAmelCase__ : List[str] = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase__ : Any = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase__ : Union[str, Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase__ : List[str] = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase__ : Dict = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase__ : Any = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase__ : List[str] = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase__ : Dict = convert_attention(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
current_layer += 1
if i != len(lowerCAmelCase ) - 1:
UpperCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase__ : int = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase__ : Union[str, Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = checkpoint["""out.0.weight"""]
UpperCAmelCase__ : List[Any] = checkpoint["""out.0.bias"""]
UpperCAmelCase__ : Tuple = checkpoint["""out.2.weight"""]
UpperCAmelCase__ : Optional[Any] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
_A = parser.parse_args()
_A = strabool(args.class_cond)
_A = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
_A = None
_A = con_pt_to_diffuser(args.unet_path, unet_config)
_A = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
_A = CMStochasticIterativeScheduler(**scheduler_config)
_A = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 171 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowercase ( self : Optional[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
lowerCAmelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
lowerCAmelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
lowerCAmelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
lowerCAmelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
lowerCAmelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
lowerCAmelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
lowerCAmelCase = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def __lowercase ( self : str ):
lowerCAmelCase = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
lowerCAmelCase = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
@slow
@require_torch
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(lowerCAmelCase )
@slow
@require_tf
def __lowercase ( self : str ):
lowerCAmelCase = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(lowerCAmelCase )
def __lowercase ( self : List[str] , lowerCAmelCase : int ):
lowerCAmelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
lowerCAmelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
lowerCAmelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def __lowercase ( self : Any ):
lowerCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
lowerCAmelCase = None
lowerCAmelCase = None
self.run_pipeline_test(lowerCAmelCase , [] )
@require_tf
def __lowercase ( self : Any ):
lowerCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
lowerCAmelCase = None
lowerCAmelCase = None
self.run_pipeline_test(lowerCAmelCase , [] )
def __lowercase ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
lowerCAmelCase = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
lowerCAmelCase = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def __lowercase ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] ):
lowerCAmelCase = fill_masker.tokenizer
lowerCAmelCase = fill_masker.model
lowerCAmelCase = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
lowerCAmelCase , [
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
] , )
lowerCAmelCase = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
lowerCAmelCase , [
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
] , )
lowerCAmelCase = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
lowerCAmelCase , [
[
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
],
[
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
],
] , )
with self.assertRaises(lowerCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCAmelCase ):
fill_masker("""This is""" )
self.run_test_top_k(lowerCAmelCase , lowerCAmelCase )
self.run_test_targets(lowerCAmelCase , lowerCAmelCase )
self.run_test_top_k_targets(lowerCAmelCase , lowerCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCAmelCase , lowerCAmelCase )
self.fill_mask_with_multiple_masks(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = tokenizer.get_vocab()
lowerCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCAmelCase = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase , targets=lowerCAmelCase )
lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCAmelCase , [
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
] , )
lowerCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowerCAmelCase )
lowerCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowerCAmelCase ) )
# Call argument
lowerCAmelCase = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
] , )
lowerCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowerCAmelCase )
lowerCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowerCAmelCase ) )
# Score equivalence
lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase )
lowerCAmelCase = [top_mask["""token_str"""] for top_mask in outputs]
lowerCAmelCase = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCAmelCase ) == set(lowerCAmelCase ):
lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase )
lowerCAmelCase = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCAmelCase ) , nested_simplify(lowerCAmelCase ) )
# Raises with invalid
with self.assertRaises(lowerCAmelCase ):
lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCAmelCase ):
lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(lowerCAmelCase ):
lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="""""" )
def __lowercase ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : str ):
lowerCAmelCase = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase , top_k=2 )
lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCAmelCase , [
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
] , )
lowerCAmelCase = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowerCAmelCase , [
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
] , )
self.assertEqual(nested_simplify(lowerCAmelCase ) , nested_simplify(lowerCAmelCase ) )
def __lowercase ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : Any ):
lowerCAmelCase = tokenizer.get_vocab()
lowerCAmelCase = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
# top_k=2, ntargets=3
lowerCAmelCase = sorted(vocab.keys() )[:3]
lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=lowerCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCAmelCase = [el["""token_str"""] for el in sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["score"] , reverse=lowerCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCAmelCase ).issubset(lowerCAmelCase ):
lowerCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=lowerCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCAmelCase ) , nested_simplify(lowerCAmelCase ) )
def __lowercase ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict ):
lowerCAmelCase = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
lowerCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCAmelCase = sorted(vocab.keys() )[:3]
lowerCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCAmelCase = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=lowerCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCAmelCase ) , 3 )
def __lowercase ( self : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ):
lowerCAmelCase = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
lowerCAmelCase = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowerCAmelCase , [
[
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
],
[
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
],
[
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
{"""sequence""": ANY(lowerCAmelCase ), """score""": ANY(lowerCAmelCase ), """token""": ANY(lowerCAmelCase ), """token_str""": ANY(lowerCAmelCase )},
],
] , )
| 155 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a = 1_6
a = 3_2
def lowercase (snake_case__ : Accelerator , snake_case__ : int = 16 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase = 8
else:
lowerCAmelCase = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a = mocked_dataloaders # noqa: F811
def lowercase (snake_case__ : Any , snake_case__ : List[str] ) -> str:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
lowerCAmelCase = 2
# New Code #
lowerCAmelCase = int(args.gradient_accumulation_steps )
lowerCAmelCase = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config["""lr"""]
lowerCAmelCase = int(config["""num_epochs"""] )
lowerCAmelCase = int(config["""seed"""] )
lowerCAmelCase = int(config["""batch_size"""] )
lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(snake_case__ )
lowerCAmelCase , lowerCAmelCase = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , snake_case__ )
def lowercase () -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=snake_case__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=snake_case__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 155 | 1 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def lowercase ( _snake_case : str , _snake_case : str ) ->Any:
"""simple docstring"""
__snake_case : int = RobertaPreLayerNormConfig.from_pretrained(
_snake_case , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
__snake_case : Optional[int] = torch.load(hf_hub_download(repo_id=_snake_case , filename='''pytorch_model.bin''' ) )
__snake_case : Optional[int] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
__snake_case : Union[str, Any] = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
__snake_case : Tuple = tensor_value
__snake_case : List[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_snake_case , config=_snake_case , state_dict=_snake_case )
model.save_pretrained(_snake_case )
# convert tokenizer
__snake_case : int = AutoTokenizer.from_pretrained(_snake_case )
tokenizer.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 357 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]:
"""simple docstring"""
def get_masked_lm_array(_snake_case : str ):
__snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : str = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Any = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_array(_snake_case : str ):
__snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_layer_array(_snake_case : int , _snake_case : str ):
__snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ):
__snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case )
__snake_case : int = array.reshape(_snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
print(f"""Loading model based on config from {config_path}...""" )
__snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case )
__snake_case : Dict = BertForMaskedLM(_snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : int = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__snake_case : List[Any] = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : Dict = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' )
__snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' )
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' )
__snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' )
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' )
__snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' )
# Embeddings
__snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' )
__snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
__snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' )
__snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__snake_case : Optional[Any] = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('''dense/kernel''' )
__snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' )
__snake_case : str = get_masked_lm_array('''layer_norm/gamma''' )
__snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' )
__snake_case : Tuple = get_masked_lm_array('''embedding_table''' )
# Pooling
__snake_case : Optional[Any] = BertPooler(config=_snake_case )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(_snake_case )
# Integration test - should load without any errors ;)
__snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24 | 0 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
__lowerCamelCase = torch.exp(UpperCamelCase__ )
__lowerCamelCase = torch.sum(UpperCamelCase__ , dim=1 ) # sum of exp(x_i)
__lowerCamelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(UpperCamelCase__ ) - B / A
class a__ ( nn.Module ):
def __init__( self : Union[str, Any] , a : List[Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = config.output_attentions
__lowerCamelCase = config.output_hidden_states
__lowerCamelCase = nn.ModuleList([BertLayer(a ) for _ in range(config.num_hidden_layers )] )
__lowerCamelCase = nn.ModuleList([BertHighway(a ) for _ in range(config.num_hidden_layers )] )
__lowerCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : str ):
"""simple docstring"""
if (type(a ) is float) or (type(a ) is int):
for i in range(len(self.early_exit_entropy ) ):
__lowerCamelCase = x
else:
__lowerCamelCase = x
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Optional[int] , a : Tuple=None , a : Optional[Any]=None , a : str=None , a : str=None , ):
"""simple docstring"""
__lowerCamelCase = ()
__lowerCamelCase = ()
__lowerCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__lowerCamelCase = all_hidden_states + (hidden_states,)
__lowerCamelCase = layer_module(
a , a , head_mask[i] , a , a )
__lowerCamelCase = layer_outputs[0]
if self.output_attentions:
__lowerCamelCase = all_attentions + (layer_outputs[1],)
__lowerCamelCase = (hidden_states,)
if self.output_hidden_states:
__lowerCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCamelCase = current_outputs + (all_attentions,)
__lowerCamelCase = self.highway[i](a )
# logits, pooled_output
if not self.training:
__lowerCamelCase = highway_exit[0]
__lowerCamelCase = entropy(a )
__lowerCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowerCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowerCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a , i + 1 )
else:
__lowerCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowerCamelCase = all_hidden_states + (hidden_states,)
__lowerCamelCase = (hidden_states,)
if self.output_hidden_states:
__lowerCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCamelCase = outputs + (all_attentions,)
__lowerCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , UpperCAmelCase__ , )
class a__ ( UpperCAmelCase__ ):
def __init__( self : List[Any] , a : Optional[Any] ):
"""simple docstring"""
super().__init__(a )
__lowerCamelCase = config
__lowerCamelCase = BertEmbeddings(a )
__lowerCamelCase = DeeBertEncoder(a )
__lowerCamelCase = BertPooler(a )
self.init_weights()
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return self.embeddings.word_embeddings
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Dict ):
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a )
@add_start_docstrings_to_model_forward(a )
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[Any]=None , a : Union[str, Any]=None , a : Optional[int]=None , a : Optional[int]=None , a : Any=None , a : Optional[Any]=None , a : int=None , a : Optional[int]=None , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowerCamelCase = input_ids.size()
elif inputs_embeds is not None:
__lowerCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowerCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCamelCase = torch.ones(a , device=a )
if encoder_attention_mask is None:
__lowerCamelCase = torch.ones(a , device=a )
if token_type_ids is None:
__lowerCamelCase = torch.zeros(a , dtype=torch.long , device=a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCamelCase = self.get_extended_attention_mask(a , a , a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowerCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowerCamelCase = encoder_attention_mask[:, None, None, :]
__lowerCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__lowerCamelCase = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCamelCase = self.get_head_mask(a , self.config.num_hidden_layers )
__lowerCamelCase = self.embeddings(
input_ids=a , position_ids=a , token_type_ids=a , inputs_embeds=a )
__lowerCamelCase = self.encoder(
a , attention_mask=a , head_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
__lowerCamelCase = encoder_outputs[0]
__lowerCamelCase = self.pooler(a )
__lowerCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class a__ ( UpperCAmelCase__ ):
def __init__( self : str , a : Dict , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = message
__lowerCamelCase = exit_layer # start from 1!
class a__ ( nn.Module ):
def __init__( self : Any , a : List[Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = BertPooler(a )
__lowerCamelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int ):
"""simple docstring"""
__lowerCamelCase = encoder_outputs[0]
__lowerCamelCase = self.pooler(a )
# "return" pooler_output
# BertModel
__lowerCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowerCamelCase = bmodel_output[1]
__lowerCamelCase = self.dropout(a )
__lowerCamelCase = self.classifier(a )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , UpperCAmelCase__ , )
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : Tuple ):
"""simple docstring"""
super().__init__(a )
__lowerCamelCase = config.num_labels
__lowerCamelCase = config.num_hidden_layers
__lowerCamelCase = DeeBertModel(a )
__lowerCamelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : int=None , a : List[Any]=None , a : Optional[Any]=None , a : List[str]=None , a : List[str]=None , a : Tuple=None , a : List[str]=None , a : Any=-1 , a : List[Any]=False , ):
"""simple docstring"""
__lowerCamelCase = self.num_layers
try:
__lowerCamelCase = self.bert(
a , attention_mask=a , token_type_ids=a , position_ids=a , head_mask=a , inputs_embeds=a , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowerCamelCase = outputs[1]
__lowerCamelCase = self.dropout(a )
__lowerCamelCase = self.classifier(a )
__lowerCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCamelCase = e.message
__lowerCamelCase = e.exit_layer
__lowerCamelCase = outputs[0]
if not self.training:
__lowerCamelCase = entropy(a )
__lowerCamelCase = []
__lowerCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase = MSELoss()
__lowerCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase = CrossEntropyLoss()
__lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowerCamelCase = []
for highway_exit in outputs[-1]:
__lowerCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase = MSELoss()
__lowerCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase = CrossEntropyLoss()
__lowerCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a )
if train_highway:
__lowerCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowerCamelCase = (loss,) + outputs
if not self.training:
__lowerCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 67 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : Union[str, Any] , __snake_case : UNetaDModel , __snake_case : ScoreSdeVeScheduler ) -> int:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : int = 1 , __snake_case : int = 2000 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : str = self.unet.config.sample_size
UpperCAmelCase : Union[str, Any] = (batch_size, 3, img_size, img_size)
UpperCAmelCase : int = self.unet
UpperCAmelCase : Any = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma
UpperCAmelCase : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__snake_case )
self.scheduler.set_sigmas(__snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase : Any = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase : Union[str, Any] = self.unet(__snake_case , __snake_case ).sample
UpperCAmelCase : Optional[Any] = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample
# prediction step
UpperCAmelCase : Optional[Any] = model(__snake_case , __snake_case ).sample
UpperCAmelCase : List[str] = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
UpperCAmelCase : int = sample_mean.clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__snake_case )
| 23 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : int=False ):
"""simple docstring"""
__a =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a =[(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCamelCase_( _snake_case : Any , _snake_case : List[Any] , _snake_case : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__a =''
else:
__a ='vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a =state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
__a =state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__a =in_proj_weight[
: config.hidden_size, :
]
__a =in_proj_bias[: config.hidden_size]
__a =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a =in_proj_weight[
-config.hidden_size :, :
]
__a =in_proj_bias[-config.hidden_size :]
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : List[Any] ):
"""simple docstring"""
__a =[
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
__a =dct.pop(_snake_case )
__a =val
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Tuple ):
"""simple docstring"""
__a =ViTMSNConfig()
__a =1000
__a ='datasets/huggingface/label-files'
__a ='imagenet-1k-id2label.json'
__a =json.load(open(hf_hub_download(_snake_case , _snake_case ) , 'r' ) )
__a ={int(_snake_case ): v for k, v in idalabel.items()}
__a =idalabel
__a ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__a =384
__a =1536
__a =6
elif "l16" in checkpoint_url:
__a =1024
__a =4096
__a =24
__a =16
__a =0.1
elif "b4" in checkpoint_url:
__a =4
elif "l7" in checkpoint_url:
__a =7
__a =1024
__a =4096
__a =24
__a =16
__a =0.1
__a =ViTMSNModel(_snake_case )
__a =torch.hub.load_state_dict_from_url(_snake_case , map_location='cpu' )['target_encoder']
__a =ViTImageProcessor(size=config.image_size )
remove_projection_head(_snake_case )
__a =create_rename_keys(_snake_case , base_model=_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , base_model=_snake_case )
model.load_state_dict(_snake_case )
model.eval()
__a ='http://images.cocodataset.org/val2017/000000039769.jpg'
__a =Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
__a =ViTImageProcessor(
size=config.image_size , image_mean=_snake_case , image_std=_snake_case )
__a =image_processor(images=_snake_case , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__a =model(**_snake_case )
__a =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__a =torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
__a =torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
__a =torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
__a =torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
__a =torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _snake_case , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = "Hello world! cécé herlolip"
_lowerCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase_( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
__a =BertAbsConfig(
temp_dir='.' , finetune_bert=_snake_case , large=_snake_case , share_emb=_snake_case , use_bert_emb=_snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__a =torch.load(_snake_case , lambda _snake_case , _snake_case : storage )
__a =AbsSummarizer(_snake_case , torch.device('cpu' ) , _snake_case )
original.eval()
__a =BertAbsSummarizer(_snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
__a =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
__a =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
__a =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__a =encoder_input_ids
__a =decoder_input_ids
__a =__a =None
__a =None
__a =__a =None
__a =__a =None
__a =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__a =original(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =original.generator(_snake_case )
__a =new_model(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =new_model.generator(_snake_case )
__a =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.allclose(_snake_case , _snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : int = R"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class lowerCAmelCase ( __UpperCamelCase ):
@add_start_docstrings(UpperCAmelCase )
def __call__( self : Optional[Any] , UpperCAmelCase : torch.LongTensor , UpperCAmelCase : torch.FloatTensor , **UpperCAmelCase : List[Any] ) -> bool:
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] = None ) -> Optional[Any]:
lowerCamelCase__ : List[str] = max_length
lowerCamelCase__ : Optional[Any] = max_position_embeddings
@add_start_docstrings(UpperCAmelCase )
def __call__( self : Optional[Any] , UpperCAmelCase : torch.LongTensor , UpperCAmelCase : torch.FloatTensor , **UpperCAmelCase : Optional[Any] ) -> bool:
lowerCamelCase__ : Optional[int] = input_ids.shape[-1]
lowerCamelCase__ : int = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'exceptions, performance degradation, or nothing at all.' )
return is_done
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int ) -> Dict:
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'with `max_length = start_length + max_new_tokens` instead.' , UpperCAmelCase , )
lowerCamelCase__ : Dict = start_length
lowerCamelCase__ : str = max_new_tokens
lowerCamelCase__ : Optional[Any] = start_length + max_new_tokens
@add_start_docstrings(UpperCAmelCase )
def __call__( self : List[Any] , UpperCAmelCase : torch.LongTensor , UpperCAmelCase : torch.FloatTensor , **UpperCAmelCase : Optional[Any] ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , UpperCAmelCase : float , UpperCAmelCase : Optional[float] = None ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = max_time
lowerCamelCase__ : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCAmelCase )
def __call__( self : int , UpperCAmelCase : torch.LongTensor , UpperCAmelCase : torch.FloatTensor , **UpperCAmelCase : str ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCAmelCase ( __UpperCamelCase ):
@add_start_docstrings(UpperCAmelCase )
def __call__( self : Dict , UpperCAmelCase : torch.LongTensor , UpperCAmelCase : torch.FloatTensor , **UpperCAmelCase : int ) -> bool:
return any(criteria(UpperCAmelCase , UpperCAmelCase ) for criteria in self )
@property
def A_ ( self : str ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return stopping_criterium.max_length
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
return stopping_criterium.max_length
return None
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> StoppingCriteriaList:
lowerCamelCase__ : Union[str, Any] = stopping_criteria.max_length
lowerCamelCase__ : List[str] = deepcopy(_UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , _UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_UpperCAmelCase ) )
return new_stopping_criteria
| 50 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Dict ) -> Tuple:
"""simple docstring"""
_a : Any = R'''\w+[.]\d+'''
_a : Union[str, Any] = re.findall(__a ,__a )
for pat in pats:
_a : int = key.replace(__a ,'''_'''.join(pat.split('''.''' ) ) )
return key
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Optional[int] ) -> Tuple:
"""simple docstring"""
_a : Dict = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_a : Dict = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_a : Optional[int] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_a : Union[str, Any] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_a : Tuple = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_a : List[str] = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_a : Dict = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
_a : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_a : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_a : Union[str, Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCAmelCase ( __a : Dict ,__a : str ,__a : str=42 ) -> Optional[int]:
"""simple docstring"""
_a : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_a : List[Any] = flax_model.init_weights(PRNGKey(__a ) )
_a : Optional[int] = flatten_dict(__a )
_a : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_a : List[str] = rename_key(__a )
_a : Optional[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
_a , _a : List[str] = rename_key_and_reshape_tensor(__a ,__a ,__a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
_a : Dict = jnp.asarray(__a )
return unflatten_dict(__a )
| 235 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_lowercase : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : List[Any] )-> None:
"""simple docstring"""
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 91 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
__magic_name__ : int
__magic_name__ : TreeNode | None = None
__magic_name__ : TreeNode | None = None
_lowercase : Tuple = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowerCamelCase__ ( A : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A ) != count_coins(A ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(A : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase = get_distrib(node.right )
UpperCAmelCase = 1 - left_distrib_excess
UpperCAmelCase = 1 - right_distrib_excess
UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A )
+ abs(A )
)
UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A , A )
return get_distrib(A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91 | 1 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase__ )
if number < 0:
return False
_a = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a_ : str = logging.get_logger(__name__)
a_ : Tuple = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """layoutlmv3"""
def __init__( self , __magic_name__=5_02_65 , __magic_name__=7_68 , __magic_name__=12 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=1e-5 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=10_24 , __magic_name__=1_28 , __magic_name__=1_28 , __magic_name__=True , __magic_name__=32 , __magic_name__=1_28 , __magic_name__=64 , __magic_name__=2_56 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=2_24 , __magic_name__=3 , __magic_name__=16 , __magic_name__=None , **__magic_name__ , ) -> Dict:
super().__init__(
vocab_size=__magic_name__ , hidden_size=__magic_name__ , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , intermediate_size=__magic_name__ , hidden_act=__magic_name__ , hidden_dropout_prob=__magic_name__ , attention_probs_dropout_prob=__magic_name__ , max_position_embeddings=__magic_name__ , type_vocab_size=__magic_name__ , initializer_range=__magic_name__ , layer_norm_eps=__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ , )
_a = max_ad_position_embeddings
_a = coordinate_size
_a = shape_size
_a = has_relative_attention_bias
_a = rel_pos_bins
_a = max_rel_pos
_a = has_spatial_attention_bias
_a = rel_ad_pos_bins
_a = max_rel_ad_pos
_a = text_embed
_a = visual_embed
_a = input_size
_a = num_channels
_a = patch_size
_a = classifier_dropout
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = version.parse("""1.12""" )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-5
@property
def __UpperCAmelCase ( self ) -> int:
return 12
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 3 , __magic_name__ = 40 , __magic_name__ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , __magic_name__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = processor.tokenizer.num_special_tokens_to_add(__magic_name__ )
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
_a = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_a = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_a = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = dict(
processor(
__magic_name__ , text=__magic_name__ , boxes=__magic_name__ , return_tensors=__magic_name__ , ) )
return inputs
| 168 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({'''summary''': Value('''string''' )} )
UpperCamelCase = "text"
UpperCamelCase = "summary"
@property
def lowercase__ ( self : List[Any] ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 241 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ = 2000000 ):
UpperCAmelCase_ = [0 for i in range(n + 1 )]
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCAmelCase__ ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
for i in range(lowerCAmelCase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"{solution() = }")
| 241 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __snake_case ( a ):
UpperCAmelCase__ : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''audio''': Audio()} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
UpperCAmelCase__ : str = "audio"
UpperCAmelCase__ : str = "transcription"
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Tuple):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""")
if not isinstance(features[self.audio_column] , _snake_case):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""")
UpperCAmelCase_ = copy.deepcopy(self)
UpperCAmelCase_ = self.input_schema.copy()
UpperCAmelCase_ = features[self.audio_column]
UpperCAmelCase_ = input_schema
return task_template
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 51 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Dict=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Any):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : str = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(_snake_case : Optional[int]):
if isinstance(_snake_case , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta])
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : str=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
UpperCAmelCase_ = 1_0.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''')
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case , controlnet=_snake_case)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''').resize((512, 512))
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''').resize((512, 512))
UpperCAmelCase_ = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''')
assert np.abs(expected_image - image).max() < 9e-2
| 51 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCamelCase_ (a__, a__ ):
"""simple docstring"""
_lowerCAmelCase = 'swin'
_lowerCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : Optional[int] = image_size
A_ : Optional[int] = patch_size
A_ : Optional[int] = num_channels
A_ : Any = embed_dim
A_ : List[Any] = depths
A_ : Any = len(_lowerCamelCase )
A_ : List[Any] = num_heads
A_ : Tuple = window_size
A_ : Tuple = mlp_ratio
A_ : Dict = qkv_bias
A_ : List[str] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Any = drop_path_rate
A_ : List[Any] = hidden_act
A_ : Tuple = use_absolute_embeddings
A_ : int = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 4 | 1 |
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case( __magic_name__=None ) -> int:
'''simple docstring'''
if subparsers is not None:
lowercase : Union[str, Any] = subparsers.add_parser('''env''' )
else:
lowercase : Optional[Any] = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=_snake_case , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_snake_case )
return parser
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : int = torch.__version__
lowercase : str = torch.cuda.is_available()
lowercase : Union[str, Any] = is_xpu_available()
lowercase : Optional[int] = is_npu_available()
lowercase : Any = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_snake_case ):
lowercase : Any = load_config_from_file(args.config_file ).to_dict()
lowercase : Union[str, Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''PyTorch XPU available''': str(_snake_case ),
'''PyTorch NPU available''': str(_snake_case ),
'''System RAM''': F"""{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB""",
}
if pt_cuda_available:
lowercase : str = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
lowercase : Any = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_snake_case , _snake_case )
else F"""\t{accelerate_config}"""
)
print(_snake_case )
lowercase : Dict = accelerate_config
return info
def snake_case( ) -> int:
'''simple docstring'''
lowercase : Optional[Any] = env_command_parser()
lowercase : Optional[Any] = parser.parse_args()
env_command(_snake_case )
return 0
if __name__ == "__main__":
raise SystemExit(main()) | 308 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __a :
__snake_case : int
__snake_case : int
class __a :
def __init__( self : Any , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : list[list[Edge]] = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase_ : str = size
def __getitem__( self : str , UpperCAmelCase : str ):
return iter(self._graph[vertex] )
@property
def A ( self : Union[str, Any] ):
return self._size
def A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ):
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : Dict = deque([start_vertex] )
lowerCAmelCase_ : list[int | None] = [None] * self.size
lowerCAmelCase_ : str = 0
while queue:
lowerCAmelCase_ : str = queue.popleft()
lowerCAmelCase_ : Optional[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ : Dict = current_distance + edge.weight
lowerCAmelCase_ : Dict = distances[edge.destination_vertex]
if (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ : int = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCAmelCase_ : Tuple = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCAmelCase_ : int = (wi_a, wi_a)
else:
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCAmelCase_ : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ : List[Any] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Optional[int] = k.T
lowerCAmelCase_ : List[Any] = o.T
lowerCAmelCase_ : Union[str, Any] = q.T
lowerCAmelCase_ : Any = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ : str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[int] = wi[0].T
lowerCAmelCase_ : Optional[Any] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Optional[Any] = wo.T
lowerCAmelCase_ : Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ : str = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ : Dict = layer_norm
lowerCAmelCase_ : Union[str, Any] = k.T
lowerCAmelCase_ : Union[str, Any] = o.T
lowerCAmelCase_ : Any = q.T
lowerCAmelCase_ : Tuple = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Any = k.T
lowerCAmelCase_ : Any = o.T
lowerCAmelCase_ : Optional[int] = q.T
lowerCAmelCase_ : Dict = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : int = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ : Any = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : List[Any] = wi[1].T
else:
lowerCAmelCase_ : Optional[Any] = wi.T
lowerCAmelCase_ : str = wo.T
lowerCAmelCase_ : int = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : List[Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ : List[str] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = TaConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Optional[int] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ : Dict = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 28 | 0 |
'''simple docstring'''
class snake_case :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ = {} # Mapping from char to TrieNode
lowerCamelCase_ = False
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self
for char in word:
if char not in curr.nodes:
lowerCamelCase_ = TrieNode()
lowerCamelCase_ = curr.nodes[char]
lowerCamelCase_ = True
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase_ = curr.nodes[char]
return curr.is_leaf
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
def _delete(UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
if index == len(UpperCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase_ = False
return len(curr.nodes ) == 0
lowerCamelCase_ = word[index]
lowerCamelCase_ = curr.nodes.get(UpperCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase_ = _delete(UpperCamelCase , UpperCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase , 0 )
def __snake_case ( UpperCAmelCase_ : TrieNode , UpperCAmelCase_ : str ):
if node.is_leaf:
print(UpperCAmelCase_ , end=" " )
for key, value in node.nodes.items():
print_words(UpperCAmelCase_ , word + key )
def __snake_case ( ):
lowerCamelCase_ = "banana bananas bandana band apple all beast".split()
lowerCamelCase_ = TrieNode()
root.insert_many(UpperCAmelCase_ )
# print_words(root, "")
assert all(root.find(UpperCAmelCase_ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : bool ):
print(str(UpperCAmelCase_ ) , "works!" if passes else "doesn't work :(" )
def __snake_case ( ):
assert test_trie()
def __snake_case ( ):
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 55 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = []
create_all_state(1 , _snake_case , _snake_case , [] , _snake_case )
return result
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_snake_case , total_number - level + 2 ):
current_list.append(_snake_case )
create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case )
current_list.pop()
def _snake_case ( _snake_case : list[list[int]] ) -> None:
'''simple docstring'''
for i in total_list:
print(*_snake_case )
if __name__ == "__main__":
a = 4
a = 2
a = generate_all_combinations(n, k)
print_all_state(total_list)
| 315 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Any=13 , _SCREAMING_SNAKE_CASE: List[Any]=7 , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=99 , _SCREAMING_SNAKE_CASE: str=32 , _SCREAMING_SNAKE_CASE: str=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: Optional[int]=64 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: Tuple=512 , _SCREAMING_SNAKE_CASE: Union[str, Any]=16 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Union[str, Any]=3 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Optional[int]=2 , _SCREAMING_SNAKE_CASE: Optional[int]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: int=1 , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
UpperCamelCase_ = q_groups
UpperCamelCase_ = k_groups
UpperCamelCase_ = v_groups
UpperCamelCase_ = post_attention_groups
UpperCamelCase_ = intermediate_groups
UpperCamelCase_ = output_groups
def lowercase ( self: Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.num_choices
UpperCamelCase_ = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(UpperCamelCase_) = config_and_inputs
UpperCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_UpperCamelCase : str = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Any = False
_UpperCamelCase : List[Any] = True
_UpperCamelCase : int = False
def lowercase ( self: Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37 )
def lowercase ( self: Dict ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: int ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase ( self: Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
UpperCamelCase_ = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE )[0]
UpperCamelCase_ = torch.Size((1, 3) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 371 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCamelCase :
def __init__( self: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ = ""
UpperCamelCase_ = ""
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = 256
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase_ = copy.deepcopy(self.img )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCamelCase_ = np.sum(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ = x[i] / self.k
self.sk += prk
UpperCamelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase_ = int(last % last )
UpperCamelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase_ = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase_ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCAmelCase = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_UpperCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 328 | 0 |
'''simple docstring'''
UpperCamelCase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase_ = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ) -> str:
assert len(str(_lowerCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_lowerCAmelCase : Optional[Any] = year // 1_00
_lowerCAmelCase : Union[str, Any] = (5 * (century % 4) + 2) % 7
_lowerCAmelCase : Tuple = year % 1_00
_lowerCAmelCase : Any = centurian % 12
_lowerCAmelCase : Dict = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_lowerCAmelCase : List[str] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_lowerCAmelCase : str = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309 | 1 |
'''simple docstring'''
import unittest
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase = None , ) -> np.ndarray:
UpperCAmelCase : Dict = np.shape(_lowercase )
UpperCAmelCase : Optional[int] = np.shape(_lowercase )
UpperCAmelCase : Dict = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase : Tuple = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase : Optional[int] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase : Any = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase : List[Any] = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> None:
UpperCAmelCase : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase : int = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase : int = np.array([[2, 1], [6, 3]] )
UpperCAmelCase : int = schur_complement(A , A , A )
UpperCAmelCase : int = np.block([[a, b], [b.T, c]] )
UpperCAmelCase : str = np.linalg.det(A )
UpperCAmelCase : Optional[int] = np.linalg.det(A )
UpperCAmelCase : Any = np.linalg.det(A )
self.assertAlmostEqual(A , det_a * det_s )
def _lowercase( self ) -> None:
UpperCAmelCase : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase : Any = np.array([[2, 1], [6, 3]] )
with self.assertRaises(A ):
schur_complement(A , A , A )
def _lowercase( self ) -> None:
UpperCAmelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase : Optional[int] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(A ):
schur_complement(A , A , A )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a : Dict = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 338 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
if not nums:
return 0
A: Union[str, Any] = nums[0]
A: List[str] = 0
for num in nums[1:]:
A: int = (
max_excluding + num,
max(__UpperCamelCase , __UpperCamelCase ),
)
return max(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Optional[Any] , A_ : Dict , A_ : Optional[Any]=2 , A_ : List[str]=True , A_ : Dict=False , A_ : Union[str, Any]=1_0 , A_ : Optional[Any]=3 , A_ : str=3_2 * 8 , A_ : List[str]=3_2 * 8 , A_ : Dict=4 , A_ : List[Any]=6_4 , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : int = use_auxiliary_loss
lowerCAmelCase_ : str = num_queries
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : Union[str, Any] = min_size
lowerCAmelCase_ : Optional[int] = max_size
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : str = hidden_dim
lowerCAmelCase_ : List[str] = hidden_dim
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
A_)
lowerCAmelCase_ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A_)
lowerCAmelCase_ : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A_) > 0.5
).float()
lowerCAmelCase_ : Any = (torch.rand((self.batch_size, self.num_labels) , device=A_) > 0.5).long()
lowerCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase_ : Dict = self.num_queries
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : str = [1, 1, 1, 1]
lowerCAmelCase_ : Dict = self.num_channels
lowerCAmelCase_ : List[str] = 6_4
lowerCAmelCase_ : Union[str, Any] = 1_2_8
lowerCAmelCase_ : str = self.hidden_dim
lowerCAmelCase_ : Optional[Any] = self.hidden_dim
lowerCAmelCase_ : Any = self.hidden_dim
return config
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] , A_ : List[Any] , A_ : Tuple):
lowerCAmelCase_ : Any = output.encoder_hidden_states
lowerCAmelCase_ : int = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(A_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(A_) , config.decoder_layers)
def UpperCAmelCase__ ( self : Optional[int] , A_ : int , A_ : Union[str, Any] , A_ : Union[str, Any] , A_ : str=False):
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = MaskaFormerModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : List[Any] = model(pixel_values=A_ , pixel_mask=A_)
lowerCAmelCase_ : Any = model(A_ , output_hidden_states=A_)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(A_ , A_)
def UpperCAmelCase__ ( self : List[Any] , A_ : Union[str, Any] , A_ : List[str] , A_ : List[Any] , A_ : Tuple , A_ : Any):
lowerCAmelCase_ : Any = MaskaFormerForUniversalSegmentation(config=A_)
model.to(A_)
model.eval()
def comm_check_on_output(A_ : Optional[int]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(pixel_values=A_ , pixel_mask=A_)
lowerCAmelCase_ : List[Any] = model(A_)
comm_check_on_output(A_)
lowerCAmelCase_ : Any = model(
pixel_values=A_ , pixel_mask=A_ , mask_labels=A_ , class_labels=A_)
comm_check_on_output(A_)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_a = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Tuple = MaskaFormerModelTester(self)
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_)
def UpperCAmelCase__ ( self : Optional[int]):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ , **A_ , output_hidden_states=A_)
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_)
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''')
def UpperCAmelCase__ ( self : Optional[Any]):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''')
def UpperCAmelCase__ ( self : str):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''')
def UpperCAmelCase__ ( self : Tuple):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase__ ( self : List[str]):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase__ ( self : Tuple):
pass
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(A_)
lowerCAmelCase_ : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase_ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
@slow
def UpperCAmelCase__ ( self : List[str]):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase_ : List[str] = MaskaFormerModel.from_pretrained(A_)
self.assertIsNotNone(A_)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[int] = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : str = {
'''pixel_values''': torch.randn((2, 3, *size) , device=A_),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=A_),
'''class_labels''': torch.zeros(2 , 1_0 , device=A_).long(),
}
lowerCAmelCase_ : Union[str, Any] = self.model_tester.get_config()
lowerCAmelCase_ : Any = MaskaFormerForUniversalSegmentation(A_).to(A_)
lowerCAmelCase_ : Union[str, Any] = model(**A_)
self.assertTrue(outputs.loss is not None)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ , **A_ , output_hidden_states=A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(A_).to(A_)
lowerCAmelCase_ : List[str] = model(**A_ , output_attentions=A_)
self.assertTrue(outputs.attentions is not None)
def UpperCAmelCase__ ( self : List[Any]):
if not self.model_tester.is_training:
return
lowerCAmelCase_ : Dict = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(A_)
model.to(A_)
model.train()
lowerCAmelCase_ : List[Any] = model(A_ , mask_labels=A_ , class_labels=A_).loss
loss.backward()
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : str = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Any = model_class(A_).to(A_)
model.train()
lowerCAmelCase_ : List[str] = model(A_ , mask_labels=A_ , class_labels=A_)
lowerCAmelCase_ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
A__ : int = 1E-4
def UpperCamelCase( ):
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Tuple):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase__ ( self : Optional[Any]):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Any = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(A_)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Tuple = prepare_img()
lowerCAmelCase_ : Any = image_processor(A_ , return_tensors='''pt''').to(A_)
lowerCAmelCase_ : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(A_ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**A_)
lowerCAmelCase_ : List[str] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_))
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_))
lowerCAmelCase_ : Optional[int] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A_ , atol=A_))
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(A_).eval()
lowerCAmelCase_ : Optional[int] = self.default_image_processor
lowerCAmelCase_ : List[Any] = prepare_img()
lowerCAmelCase_ : Tuple = image_processor(A_ , return_tensors='''pt''').to(A_)
lowerCAmelCase_ : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(A_ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**A_)
# masks_queries_logits
lowerCAmelCase_ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
lowerCAmelCase_ : Tuple = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowerCAmelCase_ : Optional[Any] = torch.tensor(A_).to(A_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A_ , atol=A_))
# class_queries_logits
lowerCAmelCase_ : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
lowerCAmelCase_ : Any = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
]).to(A_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A_ , atol=A_))
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(A_).eval()
lowerCAmelCase_ : Optional[Any] = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors='''pt''' , )
lowerCAmelCase_ : Dict = inputs['''pixel_values'''].to(A_)
lowerCAmelCase_ : Tuple = [el.to(A_) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : str = [el.to(A_) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : int = model(**A_)
self.assertTrue(outputs.loss is not None)
| 103 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
UpperCamelCase__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
UpperCamelCase__ = """main"""
# Default branch name
UpperCamelCase__ = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
UpperCamelCase__ = """aaaaaaa"""
# This commit does not exist, so we should 404.
UpperCamelCase__ = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
UpperCamelCase__ = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def _a ( ):
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def _a ( ):
print("Bonjour!" )
yield
print("Au revoir!" )
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class a__ ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(find_labels(_A ) , ["labels"] )
self.assertEqual(find_labels(_A ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(_A ) , ["start_positions", "end_positions"] )
class a__ ( snake_case__ ):
pass
self.assertEqual(find_labels(_A ) , ["labels"] )
@require_tf
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(find_labels(_A ) , ["labels"] )
self.assertEqual(find_labels(_A ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(_A ) , ["start_positions", "end_positions"] )
class a__ ( snake_case__ ):
pass
self.assertEqual(find_labels(_A ) , ["labels"] )
@require_flax
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(find_labels(_A ) , [] )
self.assertEqual(find_labels(_A ) , [] )
self.assertEqual(find_labels(_A ) , [] )
class a__ ( snake_case__ ):
pass
self.assertEqual(find_labels(_A ) , [] )
| 102 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModel(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForMaskedImageModeling(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForMaskedImageModeling(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_a : Optional[Any] = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_a : str = False
_a : str = False
_a : List[str] = False
_a : Optional[int] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Dense ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFDeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="tf" )
# forward pass
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 102 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase = 10 ) -> str:
if not isinstance(lowercase ,lowercase ) or n < 0:
raise ValueError("""Invalid input""" )
snake_case : str = 10**n
snake_case : Tuple = 28433 * (pow(2 ,7830457 ,lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 124 |
import re
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
if len(re.findall("""[ATCG]""" ,lowercase ) ) != len(lowercase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" ,"""TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class __snake_case ( A__ ):
lowerCAmelCase_ = "mvp"
lowerCAmelCase_ = ["past_key_values"]
lowerCAmelCase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , _lowercase : Optional[int]=5_02_67 , _lowercase : List[Any]=10_24 , _lowercase : str=12 , _lowercase : Union[str, Any]=40_96 , _lowercase : List[Any]=16 , _lowercase : Tuple=12 , _lowercase : Tuple=40_96 , _lowercase : Union[str, Any]=16 , _lowercase : Any=0.0 , _lowercase : Dict=0.0 , _lowercase : List[Any]="gelu" , _lowercase : Tuple=10_24 , _lowercase : int=0.1 , _lowercase : Any=0.0 , _lowercase : List[str]=0.0 , _lowercase : Dict=0.02 , _lowercase : Any=0.0 , _lowercase : Optional[int]=False , _lowercase : List[str]=True , _lowercase : Tuple=1 , _lowercase : Tuple=0 , _lowercase : List[str]=2 , _lowercase : Optional[Any]=True , _lowercase : Dict=2 , _lowercase : Any=2 , _lowercase : Any=False , _lowercase : Any=1_00 , _lowercase : Optional[Any]=8_00 , **_lowercase : List[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ = use_prompt
SCREAMING_SNAKE_CASE__ = prompt_length
SCREAMING_SNAKE_CASE__ = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
SCREAMING_SNAKE_CASE__ = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 353 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def __a ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = (32, 32)
SCREAMING_SNAKE_CASE__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
@property
def __a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_lowercase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def __a ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(_lowercase )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE__ = DDPMScheduler()
SCREAMING_SNAKE_CASE__ = DDIMScheduler(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ = self.dummy_vae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline(
unet=_lowercase , low_res_scheduler=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=_lowercase , generator=_lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=_lowercase , generator=_lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=_lowercase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE__ = DDPMScheduler()
SCREAMING_SNAKE_CASE__ = DDIMScheduler(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ = self.dummy_vae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline(
unet=_lowercase , low_res_scheduler=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=_lowercase , generator=_lowercase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE__ = DDPMScheduler()
SCREAMING_SNAKE_CASE__ = DDIMScheduler(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ = self.dummy_vae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE__ = unet.half()
SCREAMING_SNAKE_CASE__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline(
unet=_lowercase , low_res_scheduler=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , ).images
SCREAMING_SNAKE_CASE__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
SCREAMING_SNAKE_CASE__ = """stabilityai/stable-diffusion-x4-upscaler"""
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline.from_pretrained(_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = """a cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=_lowercase , image=_lowercase , generator=_lowercase , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = """stabilityai/stable-diffusion-x4-upscaler"""
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = """a cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=_lowercase , image=_lowercase , generator=_lowercase , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __a ( self : Any ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
SCREAMING_SNAKE_CASE__ = """stabilityai/stable-diffusion-x4-upscaler"""
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = """a cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=_lowercase , image=_lowercase , generator=_lowercase , num_inference_steps=5 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 204 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowercase : int = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase) -> int:
'''simple docstring'''
super().__init__(*lowercase , **lowercase)
self.check_model_type(lowercase)
def __lowercase ( self , lowercase=None , lowercase=None , lowercase=None , **lowercase) -> int:
'''simple docstring'''
a__ , a__ : Tuple = {}, {}
if padding is not None:
a__ : Tuple = padding
if truncation is not None:
a__ : List[str] = truncation
if top_k is not None:
a__ : Tuple = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase , lowercase = None , **lowercase) -> str:
'''simple docstring'''
if isinstance(lowercase , (Image.Image, str)) and isinstance(lowercase , lowercase):
a__ : Union[str, Any] = {'image': image, 'question': question}
else:
a__ : Optional[Any] = image
a__ : Dict = super().__call__(lowercase , **lowercase)
return results
def __lowercase ( self , lowercase , lowercase=False , lowercase=False) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = load_image(inputs['image'])
a__ : Tuple = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase , truncation=lowercase)
a__ : Dict = self.image_processor(images=lowercase , return_tensors=self.framework)
model_inputs.update(lowercase)
return model_inputs
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : int = self.model(**lowercase)
return model_outputs
def __lowercase ( self , lowercase , lowercase=5) -> int:
'''simple docstring'''
if top_k > self.model.config.num_labels:
a__ : Optional[int] = self.model.config.num_labels
if self.framework == "pt":
a__ : Dict = model_outputs.logits.sigmoid()[0]
a__ , a__ : str = probs.topk(lowercase)
else:
raise ValueError(F'Unsupported framework: {self.framework}')
a__ : str = scores.tolist()
a__ : Optional[int] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase)]
| 99 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> List[str]:
'''simple docstring'''
A__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A__ = F'{src_lang}-{tgt_lang}'
A__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(F'Generating {path}' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase__ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model_name.split("""-""")
lowerCAmelCase__ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 68 | 0 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def a__ ( __lowercase , __lowercase , __lowercase ) -> float:
_A = x
_A = y
for step in range(__lowercase ): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a__ ( __lowercase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a__ ( __lowercase ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowercase , 1 , 1 ) )
def a__ ( __lowercase = 800 , __lowercase = 600 , __lowercase = -0.6 , __lowercase = 0 , __lowercase = 3.2 , __lowercase = 50 , __lowercase = True , ) -> Image.Image:
_A = Image.new("RGB" , (image_width, image_height) )
_A = img.load()
# loop through the image-coordinates
for image_x in range(__lowercase ):
for image_y in range(__lowercase ):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(__lowercase , __lowercase , __lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(__lowercase )
else:
_A = get_black_and_white_rgb(__lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 350 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def a__ ( __lowercase , __lowercase , __lowercase = 1_6000 ) -> List[str]:
_A = int(round(sample_rate * max_length ) )
if len(__lowercase ) <= sample_length:
return wav
_A = randint(0 , len(__lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class snake_case :
__UpperCamelCase = field(default=_UpperCamelCase , metadata={'help': 'Name of a dataset from the datasets package'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'A file containing the training audio paths and labels.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'A file containing the validation audio paths and labels.'})
__UpperCamelCase = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
__UpperCamelCase = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
__UpperCamelCase = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
__UpperCamelCase = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class snake_case :
__UpperCamelCase = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
__UpperCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , a__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def a__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--label_column_name` to the correct text column - one of "
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_A = feature_extractor.model_input_names[0]
def train_transforms(__lowercase ):
_A = []
for audio in batch[data_args.audio_column_name]:
_A = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowercase )
_A = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(__lowercase )}
_A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__lowercase ):
_A = [audio["array"] for audio in batch[data_args.audio_column_name]]
_A = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(__lowercase )}
_A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_A = raw_datasets["train"].features[data_args.label_column_name].names
_A , _A = {}, {}
for i, label in enumerate(__lowercase ):
_A = str(__lowercase )
_A = label
# Load the accuracy metric from the datasets package
_A = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__lowercase ):
_A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowercase , references=eval_pred.label_ids )
_A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_A = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowercase , output_all_columns=__lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_A = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowercase , output_all_columns=__lowercase )
# Initialize our trainer
_A = Trainer(
model=__lowercase , args=__lowercase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics("eval" , __lowercase )
trainer.save_metrics("eval" , __lowercase )
# Write model card and (optionally) push to hub
_A = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main() | 163 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( __lowercase ):
def __get__( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=None ) -> str:
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
lowerCAmelCase = '__cached_' + self.fget.__name__
lowerCAmelCase = getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if cached is None:
lowerCAmelCase = self.fget(__UpperCAmelCase )
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return cached
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a_ ( lowerCamelCase : str ):
if is_torch_fx_proxy(lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase : Optional[Any] ):
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase : Tuple ):
return _is_numpy(lowerCamelCase )
def a_ ( lowerCamelCase : Dict ):
import torch
return isinstance(lowerCamelCase , torch.Tensor )
def a_ ( lowerCamelCase : Tuple ):
return False if not is_torch_available() else _is_torch(lowerCamelCase )
def a_ ( lowerCamelCase : List[str] ):
import torch
return isinstance(lowerCamelCase , torch.device )
def a_ ( lowerCamelCase : str ):
return False if not is_torch_available() else _is_torch_device(lowerCamelCase )
def a_ ( lowerCamelCase : Any ):
import torch
if isinstance(lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
else:
return False
return isinstance(lowerCamelCase , torch.dtype )
def a_ ( lowerCamelCase : Union[str, Any] ):
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase )
def a_ ( lowerCamelCase : str ):
import tensorflow as tf
return isinstance(lowerCamelCase , tf.Tensor )
def a_ ( lowerCamelCase : Optional[Any] ):
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowerCamelCase )
return type(lowerCamelCase ) == tf.Tensor
def a_ ( lowerCamelCase : str ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase )
def a_ ( lowerCamelCase : Union[str, Any] ):
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase , jnp.ndarray )
def a_ ( lowerCamelCase : List[str] ):
return False if not is_flax_available() else _is_jax(lowerCamelCase )
def a_ ( lowerCamelCase : Tuple ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return [to_py_obj(lowerCamelCase ) for o in obj]
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase ).tolist()
elif isinstance(lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a_ ( lowerCamelCase : Dict ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return np.array(lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase )
else:
return obj
class UpperCAmelCase_ ( __lowercase ):
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = fields(self )
# Safety and consistency checks
if not len(__UpperCAmelCase ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
lowerCAmelCase = getattr(self , class_fields[0].name )
lowerCAmelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__UpperCAmelCase ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase = first_field.items()
lowerCAmelCase = True
else:
try:
lowerCAmelCase = iter(__UpperCAmelCase )
lowerCAmelCase = True
except TypeError:
lowerCAmelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__UpperCAmelCase ):
if (
not isinstance(__UpperCAmelCase , (list, tuple) )
or not len(__UpperCAmelCase ) == 2
or not isinstance(element[0] , __UpperCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCAmelCase = element[1]
elif first_field is not None:
lowerCAmelCase = first_field
else:
for field in class_fields:
lowerCAmelCase = getattr(self , field.name )
if v is not None:
lowerCAmelCase = v
def __delitem__( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __UpperCAmelCase ( self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : int ) -> Optional[Any]:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ) -> str:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> List[Any]:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Optional[Any] , UpperCAmelCase__ : int ) -> Union[str, Any]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ) -> Any:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__UpperCAmelCase , __UpperCAmelCase )
super().__setattr__(__UpperCAmelCase , __UpperCAmelCase )
def __setitem__( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
super().__setitem__(__UpperCAmelCase , __UpperCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( __lowercase , __lowercase ):
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , UpperCAmelCase__ : Dict ) -> List[str]:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''longest'''
lowerCamelCase : Optional[int] = '''max_length'''
lowerCamelCase : Optional[int] = '''do_not_pad'''
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''pt'''
lowerCamelCase : Tuple = '''tf'''
lowerCamelCase : Tuple = '''np'''
lowerCamelCase : Dict = '''jax'''
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : List[ContextManager] ) -> Optional[Any]:
lowerCAmelCase = context_managers
lowerCAmelCase = ExitStack()
def __enter__( self : str ) -> Dict:
for context_manager in self.context_managers:
self.stack.enter_context(__UpperCAmelCase )
def __exit__( self : int , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
self.stack.__exit__(*__UpperCAmelCase , **__UpperCAmelCase )
def a_ ( lowerCamelCase : List[Any] ):
lowerCAmelCase = infer_framework(lowerCamelCase )
if framework == "tf":
lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a_ ( lowerCamelCase : Union[str, Any] ):
lowerCAmelCase = model_class.__name__
lowerCAmelCase = infer_framework(lowerCamelCase )
if framework == "tf":
lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a_ ( lowerCamelCase : MutableMapping , lowerCamelCase : str = "" , lowerCamelCase : str = "." ):
def _flatten_dict(lowerCamelCase : Optional[Any] , lowerCamelCase : int="" , lowerCamelCase : List[str]="." ):
for k, v in d.items():
lowerCAmelCase = str(lowerCamelCase ) + delimiter + str(lowerCamelCase ) if parent_key else k
if v and isinstance(lowerCamelCase , lowerCamelCase ):
yield from flatten_dict(lowerCamelCase , lowerCamelCase , delimiter=lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
@contextmanager
def a_ ( lowerCamelCase : Any , lowerCamelCase : bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[str]=None ):
if is_numpy_array(lowerCamelCase ):
return np.transpose(lowerCamelCase , axes=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.T if axes is None else array.permute(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.transpose(lowerCamelCase , perm=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.transpose(lowerCamelCase , axes=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[int] ):
if is_numpy_array(lowerCamelCase ):
return np.reshape(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.reshape(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.reshape(lowerCamelCase , lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.reshape(lowerCamelCase , lowerCamelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase : Dict , lowerCamelCase : int=None ):
if is_numpy_array(lowerCamelCase ):
return np.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.squeeze(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Optional[int] ):
if is_numpy_array(lowerCamelCase ):
return np.expand_dims(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.unsqueeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.expand_dims(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase : List[Any] ):
if is_numpy_array(lowerCamelCase ):
return np.size(lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.numel()
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.size(lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : int ):
for key, value in auto_map.items():
if isinstance(lowerCamelCase , (tuple, list) ):
lowerCAmelCase = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase = f'''{repo_id}--{value}'''
return auto_map
def a_ ( lowerCamelCase : str ):
for base_class in inspect.getmro(lowerCamelCase ):
lowerCAmelCase = base_class.__module__
lowerCAmelCase = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 4 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ) ) )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
A__ = (
'Wrong input data\'s dimensions... '
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(UpperCamelCase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
A__ = (
'Wrong input data\'s shape... '
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(UpperCamelCase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
A__ = (
'Input data have different datatype... '
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(UpperCamelCase__ )
A__ = []
for value in value_array:
A__ = euclidean(UpperCamelCase__ , dataset[0] )
A__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
A__ = euclidean(UpperCamelCase__ , UpperCamelCase__ )
if dist > temp_dist:
A__ = temp_dist
A__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return np.dot(UpperCamelCase__ , UpperCamelCase__ ) / (norm(UpperCamelCase__ ) * norm(UpperCamelCase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | """simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(R"digital_image_processing/image_data/lena_small.jpg")
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = cn.convert_to_negative(UpperCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase ( ):
"""simple docstring"""
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCamelCase__ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A__ = canny.canny(UpperCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase ( ):
"""simple docstring"""
assert gg.gaussian_filter(UpperCamelCase__ , 5 , sigma=0.9 ).all()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
A__ = conv.img_convolve(UpperCamelCase__ , UpperCamelCase__ ).astype(UpperCamelCase__ )
assert res.any()
def UpperCAmelCase ( ):
"""simple docstring"""
assert med.median_filter(UpperCamelCase__ , 3 ).any()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ , A__ = sob.sobel_filter(UpperCamelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = sp.make_sepia(UpperCamelCase__ , 20 )
assert sepia.all()
def UpperCAmelCase ( UpperCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
A__ = bs.Burkes(imread(UpperCamelCase__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase ( UpperCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
A__ = rs.NearestNeighbour(imread(UpperCamelCase__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
A__ = imread(UpperCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A__ = 0
A__ = 0
A__ = image[x_coordinate][y_coordinate]
A__ = lbp.get_neighbors_pixel(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A__ = lbp.local_binary_value(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert lbp_image.any()
| 154 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if not head:
return True
# split the list to two parts
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = head.next, head
while fast and fast.next:
__lowerCAmelCase : Any = fast.next.next
__lowerCAmelCase : Any = slow.next
__lowerCAmelCase : Optional[Any] = slow.next
__lowerCAmelCase : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
__lowerCAmelCase : Union[str, Any] = None
while second:
__lowerCAmelCase : List[Any] = second.next
__lowerCAmelCase : Optional[Any] = node
__lowerCAmelCase : List[str] = second
__lowerCAmelCase : int = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__lowerCAmelCase : Any = node.next
__lowerCAmelCase : Any = head.next
return True
def __lowerCAmelCase (_UpperCamelCase ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__lowerCAmelCase : Optional[int] = head
while fast and fast.next:
__lowerCAmelCase , __lowerCAmelCase : List[str] = fast.next.next, slow.next
# 2. Push the second half into the stack
__lowerCAmelCase : Tuple = [slow.val]
while slow.next:
__lowerCAmelCase : Dict = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__lowerCAmelCase : Tuple = cur.next
return True
def __lowerCAmelCase (_UpperCamelCase ):
if not head or not head.next:
return True
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : Optional[Any] = 0
while head:
if head.val in d:
d[head.val].append(_UpperCamelCase )
else:
__lowerCAmelCase : Dict = [pos]
__lowerCAmelCase : int = head.next
pos += 1
__lowerCAmelCase : Union[str, Any] = pos - 1
__lowerCAmelCase : List[Any] = 0
for v in d.values():
if len(_UpperCamelCase ) % 2 != 0:
middle += 1
else:
__lowerCAmelCase : List[str] = 0
for i in range(0 , len(_UpperCamelCase ) ):
if v[i] + v[len(_UpperCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True | 86 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__UpperCamelCase : int = 10
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : int ):
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
if array[i] == target:
return i
return -1
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : int ):
lowerCAmelCase = 0
lowerCAmelCase = len(_UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (left + right) // 3 + 1
lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase = one_third - 1
elif array[two_third] < target:
lowerCAmelCase = two_third + 1
else:
lowerCAmelCase = one_third + 1
lowerCAmelCase = two_third - 1
else:
return -1
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : int ):
if left < right:
if right - left < precision:
return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (left + right) // 3 + 1
lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_UpperCAmelCase , one_third - 1 , _UpperCAmelCase , _UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _UpperCAmelCase , _UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
__UpperCamelCase : Any = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__UpperCamelCase : List[Any] = int(input('''Enter the number to be found in the list:\n''').strip())
__UpperCamelCase : Any = ite_ternary_search(collection, target)
__UpperCamelCase : List[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 359 |
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : int = (DPMSolverSinglestepScheduler,)
lowerCAmelCase_ : Optional[int] = (("num_inference_steps", 25),)
def lowerCAmelCase__ ( self , **a__ ) -> int:
'''simple docstring'''
snake_case_ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**a__ )
return config
def lowerCAmelCase__ ( self , a__=0 , **a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop("num_inference_steps" , a__ )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**a__ )
snake_case_ = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
snake_case_ = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ , snake_case_ = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
snake_case_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
snake_case_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self , a__=0 , **a__ ) -> Any:
'''simple docstring'''
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop("num_inference_steps" , a__ )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
snake_case_ = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
snake_case_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase__ ( self , a__=None , **a__ ) -> List[str]:
'''simple docstring'''
if scheduler is None:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**a__ )
snake_case_ = scheduler_class(**a__ )
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**a__ )
snake_case_ = scheduler_class(**a__ )
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = model(a__ , a__ )
snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case_ = 50
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
snake_case_ = model(a__ , a__ )
snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=a__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case_ = self.full_loop(scheduler=a__ )
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
snake_case_ = DEISMultistepScheduler.from_config(scheduler.config )
snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case_ = self.full_loop(scheduler=a__ )
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type="dpmsolver++" , solver_order=a__ , solver_type=a__ , )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
snake_case_ = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type="learned_range" )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.full_loop()
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.full_loop(use_karras_sigmas=a__ )
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.full_loop(prediction_type="v_prediction" )
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=a__ )
snake_case_ = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
snake_case_ = scheduler_class(**a__ )
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = model(a__ , a__ )
snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 85 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =ConvBertTokenizer
def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict:
"""simple docstring"""
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : Any = strip_accents
SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a )
SCREAMING_SNAKE_CASE : str = do_lower_case
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a )
return tuple(a ) | 76 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
# Load configuration defined in the metadata file
with open(lowercase__ ) as metadata_file:
A = json.load(lowercase__ )
A = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A = torch.load(lowercase__ , map_location="cpu" )
# Load the entity vocab file
A = load_entity_vocab(lowercase__ )
A = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A = AddedToken("<ent>" , lstrip=lowercase__ , rstrip=lowercase__ )
A = AddedToken("<ent2>" , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
A = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
A = state_dict["embeddings.word_embeddings.weight"]
A = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A = F"""encoder.layer.{layer_index}.attention.self."""
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A = state_dict["entity_embeddings.entity_embeddings.weight"]
A = entity_emb[entity_vocab["[MASK]"]]
A = LukeModel(config=lowercase__ ).eval()
A , A = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
A = LukeTokenizer.from_pretrained(lowercase__ , task="entity_classification" )
A = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A = (39, 42)
A = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors="pt" )
A = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
A = torch.Size((1, 42, 1_024) )
A = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
A = torch.Size((1, 42, 768) )
A = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A = torch.Size((1, 1, 1_024) )
A = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
A = torch.Size((1, 1, 768) )
A = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = {}
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(lowercase__ ):
A , A = line.rstrip().split("\t" )
A = index
return entity_vocab
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 57 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__A : Union[str, Any] = get_logger(__name__)
class __UpperCamelCase ( enum.Enum ):
SCREAMING_SNAKE_CASE = "all_checks"
SCREAMING_SNAKE_CASE = "basic_checks"
SCREAMING_SNAKE_CASE = "no_checks"
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase__ ) - set(lowercase__ ) ) )
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase__ ) - set(lowercase__ ) ) )
A = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
A = " for " + verification_name if verification_name is not None else ""
if len(lowercase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase__ ) - set(lowercase__ ) ) )
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise UnexpectedSplits(str(set(lowercase__ ) - set(lowercase__ ) ) )
A = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase__ ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase__ ) )
logger.info("All the splits matched successfully." )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ = True ):
"""simple docstring"""
if record_checksum:
A = shaaaa()
with open(lowercase__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(lowercase__ )
A = m.hexdigest()
else:
A = None
return {"num_bytes": os.path.getsize(lowercase__ ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 57 | 1 |
import string
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__lowerCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCamelCase = string.ascii_uppercase.find(snake_case__ )
__lowerCamelCase = num - key
if num < 0:
__lowerCamelCase = num + len(string.ascii_uppercase )
__lowerCamelCase = translated + string.ascii_uppercase[num]
else:
__lowerCamelCase = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = input("""Encrypted message: """ )
__lowerCamelCase = message.upper()
decrypt(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 12 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/nllb-large-en-ro''': 1_024,
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Tuple = ["input_ids", "attention_mask"]
__snake_case : Dict = NllbTokenizer
__snake_case : List[int] = []
__snake_case : List[int] = []
def __init__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Tuple="<s>" , UpperCAmelCase_: str="</s>" , UpperCAmelCase_: Union[str, Any]="</s>" , UpperCAmelCase_: int="<s>" , UpperCAmelCase_: Union[str, Any]="<unk>" , UpperCAmelCase_: Union[str, Any]="<pad>" , UpperCAmelCase_: str="<mask>" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int=None , UpperCAmelCase_: str=False , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """eng_Latn"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] , UpperCAmelCase_: Optional[str] , **UpperCAmelCase_: Any ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: str = "eng_Latn" , UpperCAmelCase_: Optional[List[str]] = None , UpperCAmelCase_: str = "fra_Latn" , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 306 | 0 |
lowerCamelCase_ : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase_ : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase_ : List[str] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 368 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A__ ( ) -> Union[str, Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCamelCase_: Optional[int] = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A__ ( ) -> Union[str, Any]:
assert _test_patching.open is open
UpperCamelCase_: List[Any] = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , lowerCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A__ ( ) -> Optional[Any]:
# pandas.read_csv is not present in _test_patching
UpperCamelCase_: Optional[Any] = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , lowerCamelCase ):
pass
def A__ ( ) -> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
UpperCamelCase_: List[Any] = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , lowerCamelCase ) is None
with patch_submodule(_test_patching , """len""" , lowerCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def A__ ( ) -> Any:
UpperCamelCase_: Dict = """__test_patch_submodule_start_and_stop_mock__"""
UpperCamelCase_: List[str] = patch_submodule(_test_patching , """open""" , lowerCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A__ ( ) -> List[str]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCamelCase_: Optional[Any] = """__test_patch_submodule_successive_join__"""
UpperCamelCase_: Any = """__test_patch_submodule_successive_dirname__"""
UpperCamelCase_: Dict = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.rename""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.join""" , lowerCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A__ ( ) -> Union[str, Any]:
UpperCamelCase_: Dict = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , lowerCamelCase ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , lowerCamelCase ):
pass
| 223 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ) -> Any:
# Load configuration defined in the metadata file
with open(UpperCAmelCase_ ) as metadata_file:
__lowerCamelCase : List[Any] = json.load(UpperCAmelCase_ )
__lowerCamelCase : Optional[Any] = LukeConfig(use_entity_aware_attention=UpperCAmelCase_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__lowerCamelCase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
# Load the entity vocab file
__lowerCamelCase : Tuple = load_entity_vocab(UpperCAmelCase_ )
__lowerCamelCase : int = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__lowerCamelCase : List[Any] = AddedToken('<ent>' , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
__lowerCamelCase : str = AddedToken('<ent2>' , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[str] = LukeTokenizer.from_pretrained(UpperCAmelCase_ )
# Initialize the embeddings of the special tokens
__lowerCamelCase : Any = state_dict['embeddings.word_embeddings.weight']
__lowerCamelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__lowerCamelCase : Optional[int] = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__lowerCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowerCamelCase : Tuple = F'encoder.layer.{layer_index}.attention.self.'
__lowerCamelCase : Optional[int] = state_dict[prefix + matrix_name]
__lowerCamelCase : List[Any] = state_dict[prefix + matrix_name]
__lowerCamelCase : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowerCamelCase : Optional[Any] = state_dict['entity_embeddings.entity_embeddings.weight']
__lowerCamelCase : str = entity_emb[entity_vocab['[MASK]']]
__lowerCamelCase : Any = LukeModel(config=UpperCAmelCase_ ).eval()
__lowerCamelCase , __lowerCamelCase : Optional[int] = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
if not (len(UpperCAmelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCAmelCase_ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
__lowerCamelCase : Union[str, Any] = LukeTokenizer.from_pretrained(UpperCAmelCase_ , task='entity_classification' )
__lowerCamelCase : Union[str, Any] = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__lowerCamelCase : Dict = (39, 42)
__lowerCamelCase : Any = tokenizer(UpperCAmelCase_ , entity_spans=[span] , add_prefix_space=UpperCAmelCase_ , return_tensors='pt' )
__lowerCamelCase : List[Any] = model(**UpperCAmelCase_ )
# Verify word hidden states
if model_size == "large":
__lowerCamelCase : int = torch.Size((1, 42, 10_24) )
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
__lowerCamelCase : Any = torch.Size((1, 42, 7_68) )
__lowerCamelCase : Any = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__lowerCamelCase : Dict = torch.Size((1, 1, 10_24) )
__lowerCamelCase : Dict = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
__lowerCamelCase : Union[str, Any] = torch.Size((1, 1, 7_68) )
__lowerCamelCase : int = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(UpperCAmelCase_ ) )
model.save_pretrained(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> List[Any]:
__lowerCamelCase : Tuple = {}
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(UpperCAmelCase_ ):
__lowerCamelCase , __lowerCamelCase : Optional[int] = line.rstrip().split('\t' )
__lowerCamelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
A__ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 185 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
A__ : str = get_logger(__name__)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
__lowerCamelCase : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : Optional[int] = module._original_module if isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) else module
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Tuple = []
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = obj
__lowerCamelCase : List[Any] = target
__lowerCamelCase : Union[str, Any] = new
__lowerCamelCase : Union[str, Any] = target.split('.' )[0]
__lowerCamelCase : Dict = {}
__lowerCamelCase : Dict = attrs or []
def __enter__( self ) -> Optional[int]:
*__lowerCamelCase , __lowerCamelCase : int = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
try:
__lowerCamelCase : Optional[int] = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__lowerCamelCase : List[Any] = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__lowerCamelCase : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(SCREAMING_SNAKE_CASE_ , attrs=self.attrs ) )
__lowerCamelCase : str = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , attrs=self.attrs ) )
__lowerCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__lowerCamelCase : Union[str, Any] = getattr(import_module('.'.join(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE_ ) is attr_value:
__lowerCamelCase : Optional[int] = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__lowerCamelCase : List[Any] = globals()['__builtins__'][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self , *SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.original.pop(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self ) -> Optional[int]:
self.__enter__()
self._active_patches.append(self )
def lowercase_ ( self ) -> str:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 185 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
A_ = BertJapaneseTokenizer
A_ = False
A_ = True
def __A ( self: Optional[Any] ) -> str:
super().setUp()
_A = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self: Dict , __A: Optional[Any] ) -> Tuple:
_A = """こんにちは、世界。 \nこんばんは、世界。"""
_A = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def __A ( self: List[Any] , __A: Optional[int] ) -> Union[str, Any]:
_A = self.get_input_output_texts(_UpperCamelCase )
_A = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_A = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return text, ids
def __A ( self: Tuple ) -> Union[str, Any]:
pass # TODO add if relevant
def __A ( self: Optional[Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def __A ( self: Dict ) -> List[Any]:
pass # TODO add if relevant
def __A ( self: List[str] ) -> str:
_A = self.tokenizer_class(self.vocab_file )
_A = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __A ( self: int ) -> Dict:
_A = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(_UpperCamelCase )
_A = """こんにちは、世界。\nこんばんは、世界。"""
_A = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_A = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_UpperCamelCase , '''wb''' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , '''rb''' ) as handle:
_A = pickle.load(_UpperCamelCase )
_A = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __A ( self: int ) -> Optional[int]:
_A = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __A ( self: Optional[Any] ) -> int:
try:
_A = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __A ( self: Union[str, Any] ) -> Tuple:
try:
_A = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __A ( self: Optional[int] ) -> int:
_A = MecabTokenizer(do_lower_case=_UpperCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __A ( self: List[str] ) -> List[str]:
try:
_A = MecabTokenizer(
do_lower_case=_UpperCamelCase , normalize_text=_UpperCamelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __A ( self: int ) -> List[Any]:
_A = MecabTokenizer(normalize_text=_UpperCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __A ( self: Union[str, Any] ) -> str:
_A = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(_UpperCamelCase )
_A = """こんにちは、世界。\nこんばんは、世界。"""
_A = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_A = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_UpperCamelCase , '''wb''' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , '''rb''' ) as handle:
_A = pickle.load(_UpperCamelCase )
_A = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_sudachi
def __A ( self: str ) -> List[str]:
_A = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __A ( self: Dict ) -> Dict:
_A = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __A ( self: List[Any] ) -> int:
_A = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __A ( self: List[str] ) -> Union[str, Any]:
_A = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __A ( self: str ) -> Tuple:
_A = SudachiTokenizer(do_lower_case=_UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __A ( self: Any ) -> Optional[int]:
_A = SudachiTokenizer(normalize_text=_UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __A ( self: List[Any] ) -> Union[str, Any]:
_A = SudachiTokenizer(trim_whitespace=_UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __A ( self: Tuple ) -> str:
_A = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(_UpperCamelCase )
_A = """こんにちは、世界。\nこんばんは、世界。"""
_A = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
_A = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_UpperCamelCase , '''wb''' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , '''rb''' ) as handle:
_A = pickle.load(_UpperCamelCase )
_A = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_jumanpp
def __A ( self: Any ) -> Tuple:
_A = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __A ( self: Optional[Any] ) -> Union[str, Any]:
_A = JumanppTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __A ( self: List[Any] ) -> Optional[Any]:
_A = JumanppTokenizer(normalize_text=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __A ( self: Tuple ) -> Optional[int]:
_A = JumanppTokenizer(trim_whitespace=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __A ( self: Any ) -> Any:
_A = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __A ( self: Dict ) -> Optional[int]:
_A = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
_A = {}
for i, token in enumerate(_UpperCamelCase ):
_A = i
_A = WordpieceTokenizer(vocab=_UpperCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __A ( self: Any ) -> str:
_A = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
_A = tokenizer.subword_tokenizer
_A = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(_UpperCamelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
_A = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(_UpperCamelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __A ( self: Optional[int] ) -> Any:
_A = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
_A = tokenizer.encode('''ありがとう。''' , add_special_tokens=_UpperCamelCase )
_A = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_UpperCamelCase )
_A = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
_A = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
A_ = BertJapaneseTokenizer
A_ = False
def __A ( self: List[Any] ) -> Union[str, Any]:
super().setUp()
_A = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self: List[Any] , **__A: Optional[int] ) -> Any:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_UpperCamelCase )
def __A ( self: Union[str, Any] , __A: int ) -> Tuple:
_A = """こんにちは、世界。 \nこんばんは、世界。"""
_A = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def __A ( self: Optional[int] ) -> int:
pass # TODO add if relevant
def __A ( self: Union[str, Any] ) -> str:
pass # TODO add if relevant
def __A ( self: Optional[Any] ) -> Dict:
pass # TODO add if relevant
def __A ( self: Optional[Any] ) -> Optional[Any]:
_A = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
_A = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
_UpperCamelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __A ( self: List[str] ) -> List[Any]:
_A = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
_A = {}
for i, token in enumerate(_UpperCamelCase ):
_A = i
_A = CharacterTokenizer(vocab=_UpperCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __A ( self: Any ) -> List[Any]:
_A = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
_A = tokenizer.encode('''ありがとう。''' , add_special_tokens=_UpperCamelCase )
_A = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_UpperCamelCase )
_A = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
_A = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Dict ) -> Union[str, Any]:
_A = """cl-tohoku/bert-base-japanese"""
_A = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Any ) -> List[Any]:
_A = """cl-tohoku/bert-base-japanese"""
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
_A = """bert-base-cased"""
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 358 |
__A = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 75 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =PhobertTokenizer
lowerCamelCase : Any =False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''l à</w>''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a ) )
def SCREAMING_SNAKE_CASE__ ( self : int , **a : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = '''Tôi là VinAI Research'''
__lowerCamelCase = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = '''Tôi là VinAI Research'''
__lowerCamelCase = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__lowerCamelCase = tokenizer.tokenize(a )
print(a )
self.assertListEqual(a , a )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
| 67 |
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 | 0 |
from __future__ import annotations
class lowercase :
def __init__( self , snake_case = 0 ):
snake_case_ = key
def a ( self , snake_case , snake_case ):
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
snake_case_ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(snake_case ) ^ key ) for ch in content]
def a ( self , snake_case , snake_case ):
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
snake_case_ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(snake_case ) ^ key ) for ch in content]
def a ( self , snake_case , snake_case = 0 ):
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
snake_case_ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case_ = ''
for ch in content:
ans += chr(ord(snake_case ) ^ key )
return ans
def a ( self , snake_case , snake_case = 0 ):
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
snake_case_ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case_ = ''
for ch in content:
ans += chr(ord(snake_case ) ^ key )
return ans
def a ( self , snake_case , snake_case = 0 ):
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
try:
with open(snake_case ) as fin, open('encrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(snake_case , snake_case ) )
except OSError:
return False
return True
def a ( self , snake_case , snake_case ):
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
try:
with open(snake_case ) as fin, open('decrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(snake_case , snake_case ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 200 |
import os
import sys
import unittest
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_UpperCAmelCase : List[Any] = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_UpperCAmelCase : Dict = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = get_test_to_tester_mapping(snake_case )
snake_case_ = get_test_to_tester_mapping(snake_case )
snake_case_ = {'BertModelTest': 'BertModelTester'}
snake_case_ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
def a ( self ):
snake_case_ = get_model_to_test_mapping(snake_case )
snake_case_ = get_model_to_test_mapping(snake_case )
snake_case_ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
def a ( self ):
snake_case_ = get_model_to_tester_mapping(snake_case )
snake_case_ = get_model_to_tester_mapping(snake_case )
snake_case_ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
| 200 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase ) -> Dict:
random.seed(_SCREAMING_SNAKE_CASE )
np.random.seed(_SCREAMING_SNAKE_CASE )
torch.manual_seed(_SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE )
# ^^ safe to call this function even if cuda is not available
class A__ :
def __init__( self , A_ , A_ = 0.99_99 , A_ = 0.0 , A_ = 0 , A_ = False , A_ = 1.0 , A_ = 2 / 3 , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , torch.nn.Module ):
UpperCamelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase , )
UpperCamelCase : str = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCamelCase : Union[str, Any] = True
if kwargs.get("max_value" , __UpperCamelCase ) is not None:
UpperCamelCase : str = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase )
UpperCamelCase : Tuple = kwargs["max_value"]
if kwargs.get("min_value" , __UpperCamelCase ) is not None:
UpperCamelCase : List[Any] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase )
UpperCamelCase : List[Any] = kwargs["min_value"]
UpperCamelCase : Tuple = list(__UpperCamelCase )
UpperCamelCase : List[str] = [p.clone().detach() for p in parameters]
if kwargs.get("device" , __UpperCamelCase ) is not None:
UpperCamelCase : int = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase )
self.to(device=kwargs["device"] )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = decay
UpperCamelCase : Union[str, Any] = min_decay
UpperCamelCase : List[Any] = update_after_step
UpperCamelCase : List[Any] = use_ema_warmup
UpperCamelCase : Optional[Any] = inv_gamma
UpperCamelCase : Dict = power
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : List[str] = None # set in `step()`
UpperCamelCase : Optional[int] = model_cls
UpperCamelCase : Any = model_config
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[str] = model_cls.load_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase )
UpperCamelCase : List[str] = model_cls.from_pretrained(__UpperCamelCase )
UpperCamelCase : Any = cls(model.parameters() , model_cls=__UpperCamelCase , model_config=model.config )
ema_model.load_state_dict(__UpperCamelCase )
return ema_model
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
UpperCamelCase : List[str] = self.model_cls.from_config(self.model_config )
UpperCamelCase : str = self.state_dict()
state_dict.pop("shadow_params" , __UpperCamelCase )
model.register_to_config(**__UpperCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__UpperCamelCase )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCamelCase : List[str] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCamelCase : Optional[int] = (1 + step) / (10 + step)
UpperCamelCase : int = min(__UpperCamelCase , self.decay )
# make sure decay is not smaller than min_decay
UpperCamelCase : Union[str, Any] = max(__UpperCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if isinstance(__UpperCamelCase , torch.nn.Module ):
UpperCamelCase : List[str] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase , )
UpperCamelCase : Dict = parameters.parameters()
UpperCamelCase : Dict = list(__UpperCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCamelCase : Optional[int] = self.get_decay(self.optimization_step )
UpperCamelCase : Union[str, Any] = decay
UpperCamelCase : Union[str, Any] = 1 - decay
UpperCamelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCamelCase : Optional[int] = deepspeed.zero.GatheredParameters(__UpperCamelCase , modifier_rank=__UpperCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__UpperCamelCase )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = list(__UpperCamelCase )
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def __UpperCamelCase( self , A_=None , A_=None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = [
p.to(device=__UpperCamelCase , dtype=__UpperCamelCase ) if p.is_floating_point() else p.to(device=__UpperCamelCase )
for p in self.shadow_params
]
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = [param.detach().cpu().clone() for param in parameters]
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , __UpperCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCamelCase : Union[str, Any] = None
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = copy.deepcopy(__UpperCamelCase )
UpperCamelCase : str = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
UpperCamelCase : str = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , __UpperCamelCase ):
raise ValueError("Invalid min_decay" )
UpperCamelCase : int = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , __UpperCamelCase ):
raise ValueError("Invalid optimization_step" )
UpperCamelCase : Optional[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , __UpperCamelCase ):
raise ValueError("Invalid update_after_step" )
UpperCamelCase : Optional[int] = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __UpperCamelCase ):
raise ValueError("Invalid use_ema_warmup" )
UpperCamelCase : Any = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
UpperCamelCase : List[Any] = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
UpperCamelCase : Any = state_dict.get("shadow_params" , __UpperCamelCase )
if shadow_params is not None:
UpperCamelCase : List[str] = shadow_params
if not isinstance(self.shadow_params , __UpperCamelCase ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__UpperCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 52 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_SCREAMING_SNAKE_CASE ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
__A : Dict = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 260 | 0 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCamelCase (a__ :Features ):
"""simple docstring"""
UpperCamelCase__ = np.inf
def set_batch_size(a__ :FeatureType ) -> None:
nonlocal batch_size
if isinstance(a__ , a__ ):
UpperCamelCase__ = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(a__ , a__ ):
UpperCamelCase__ = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(a__ , a__ ) and feature.dtype == "binary":
UpperCamelCase__ = min(a__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(a__ , a__ )
return None if batch_size is np.inf else batch_size
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , **__lowerCAmelCase , ):
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
UpperCamelCase__ = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
UpperCamelCase__ = Parquet(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , hash=__lowerCAmelCase , **__lowerCAmelCase , )
def _lowerCamelCase ( self ):
# Build iterable dataset
if self.streaming:
UpperCamelCase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
UpperCamelCase__ = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
UpperCamelCase__ = dataset
UpperCamelCase__ = path_or_buf
UpperCamelCase__ = batch_size or get_writer_batch_size(dataset.features )
UpperCamelCase__ = parquet_writer_kwargs
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
UpperCamelCase__ = self._write(file_obj=__lowerCAmelCase , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
else:
UpperCamelCase__ = self._write(file_obj=self.path_or_buf , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
return written
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
UpperCamelCase__ = 0
UpperCamelCase__ = parquet_writer_kwargs.pop("""path_or_buf""" , __lowerCAmelCase )
UpperCamelCase__ = self.dataset.features.arrow_schema
UpperCamelCase__ = pq.ParquetWriter(__lowerCAmelCase , schema=__lowerCAmelCase , **__lowerCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __lowerCAmelCase ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
UpperCamelCase__ = query_table(
table=self.dataset._data , key=slice(__lowerCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 87 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : str = """token-classification"""
def __init__( self , __lowerCAmelCase ):
if type(__lowerCAmelCase ) == dict:
UpperCamelCase__ = Namespace(**__lowerCAmelCase )
UpperCamelCase__ = import_module("""tasks""" )
try:
UpperCamelCase__ = getattr(__lowerCAmelCase , hparams.task_type )
UpperCamelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
UpperCamelCase__ = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase__ = CrossEntropyLoss().ignore_index
super().__init__(__lowerCAmelCase , len(self.labels ) , self.mode )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return self.model(**__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**__lowerCAmelCase )
UpperCamelCase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ = self._feature_file(__lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
UpperCamelCase__ = self.token_classification_task.read_examples_from_file(args.data_dir , __lowerCAmelCase )
UpperCamelCase__ = self.token_classification_task.convert_examples_to_features(
__lowerCAmelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ):
UpperCamelCase__ = self._feature_file(__lowerCAmelCase )
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase )
UpperCamelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , batch_size=__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""Compute validation""" ""
UpperCamelCase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = outputs[:2]
UpperCamelCase__ = logits.detach().cpu().numpy()
UpperCamelCase__ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
UpperCamelCase__ = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
UpperCamelCase__ = np.argmax(__lowerCAmelCase , axis=2 )
UpperCamelCase__ = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
UpperCamelCase__ = dict(enumerate(self.labels ) )
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase__ = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
"""precision""": precision_score(__lowerCAmelCase , __lowerCAmelCase ),
"""recall""": recall_score(__lowerCAmelCase , __lowerCAmelCase ),
"""f1""": fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
UpperCamelCase__ = dict(results.items() )
UpperCamelCase__ = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self , __lowerCAmelCase ):
# when stable
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(__lowerCAmelCase )
UpperCamelCase__ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self , __lowerCAmelCase ):
# updating to test_epoch_end instead of deprecated test_end
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(__lowerCAmelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=__lowerCAmelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=__lowerCAmelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = NERTransformer(args)
UpperCamelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
UpperCamelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 87 | 1 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a( A : np.ndarray , A : np.ndarray , A : np.ndarray , A : int , A : int ) -> np.ndarray:
"""simple docstring"""
a = cva.getAffineTransform(A , A )
return cva.warpAffine(A , A , (rows, cols) )
if __name__ == "__main__":
# read original image
_lowercase: Union[str, Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
_lowercase: Union[str, Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_lowercase , _lowercase: Union[str, Any] = gray_img.shape
# set different points to rotate image
_lowercase: str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
_lowercase: Tuple = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
_lowercase: str = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
_lowercase: Optional[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
_lowercase: Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_lowercase: Optional[int] = plt.figure(1)
_lowercase: Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 227 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase: Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ):
"""simple docstring"""
a = {}
a = {}
if prompt is not None:
a = prompt
if generate_kwargs is not None:
a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__(self , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
a = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
a = self.model.config.model_type
if model_type == "git":
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
a = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
a = [self.tokenizer.cls_token_id] + input_ids
a = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
a = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
a = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
a = None
return model_inputs
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCamelCase_ )
and all(x is None for x in model_inputs["input_ids"] )
):
a = None
if generate_kwargs is None:
a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
a = model_inputs.pop(self.model.main_input_name )
a = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = []
for output_ids in model_outputs:
a = {
"generated_text": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 227 | 1 |
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None , __magic_name__ : Dict=1 , __magic_name__ : Optional[Any]="binary" , __magic_name__ : List[Any]=None , __magic_name__ : List[str]="warn" , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 368 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def __init__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
super().__init__(
__magic_name__ , question_encoder_tokenizer=__magic_name__ , generator_tokenizer=__magic_name__ , index=__magic_name__ , init_retrieval=__magic_name__ , )
__snake_case : List[str] = None
def lowercase__ ( self : int , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case : List[str] = str(distributed_port + 1 )
__snake_case : Any = dist.new_group(ranks=__magic_name__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=torch.floataa ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = torch.empty(__magic_name__ , dtype=__magic_name__ )
dist.scatter(__magic_name__ , src=0 , scatter_list=__magic_name__ , group=self.process_group )
return target_tensor
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case : Union[str, Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , __magic_name__ )
return ifname
def lowercase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__snake_case , __snake_case : List[Any] = self._main_retrieve(__magic_name__ , __magic_name__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__magic_name__ )
# distributed training
__snake_case : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case : Tuple = None
if self._is_main():
__snake_case : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__magic_name__ )]
dist.gather(torch.tensor(__magic_name__ ) , dst=0 , gather_list=__magic_name__ , group=self.process_group )
# scatter logic
__snake_case : Optional[int] = question_hidden_states.shape[0]
__snake_case : Optional[Any] = []
__snake_case : Any = []
if self._is_main():
assert len(__magic_name__ ) == world_size
__snake_case , __snake_case : Optional[int] = self._main_retrieve(torch.cat(__magic_name__ ).numpy() , __magic_name__ )
__snake_case , __snake_case : Tuple = torch.tensor(__magic_name__ ), torch.tensor(__magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Any = self._chunk_tensor(__magic_name__ , __magic_name__ )
__snake_case : Optional[Any] = self._scattered(__magic_name__ , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case : Any = self._scattered(__magic_name__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__magic_name__ )
| 13 | 0 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowercase__ = pytest.mark.integration
@require_faiss
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : int = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self ):
import faiss
_lowerCamelCase : Dataset = self._create_dummy_dataset()
_lowerCamelCase : str = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
_lowerCamelCase : Optional[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_lowerCamelCase, _lowerCamelCase : int = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self ):
import faiss
_lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_lowerCamelCase, _lowerCamelCase : Optional[int] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self ):
import faiss
_lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
_lowerCamelCase, _lowerCamelCase : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self ):
_lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self ):
from elasticsearch import Elasticsearch
_lowerCamelCase : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
_lowerCamelCase : Tuple = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
_lowerCamelCase : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
_lowerCamelCase : Optional[int] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import faiss
_lowerCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_lowerCamelCase : Dict = np.zeros(5 , dtype=np.floataa )
_lowerCamelCase : Dict = 1
_lowerCamelCase, _lowerCamelCase : int = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_lowerCamelCase : Union[str, Any] = np.eye(5 , dtype=np.floataa )[::-1]
_lowerCamelCase, _lowerCamelCase : str = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
_lowerCamelCase : List[str] = [scores[0] for scores in total_scores]
_lowerCamelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def A_ ( self ):
import faiss
_lowerCamelCase : Tuple = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_lowerCamelCase : int = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
_lowerCamelCase : List[Any] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self ):
import faiss
_lowerCamelCase : Dict = faiss.IndexFlat(5 )
_lowerCamelCase : Union[str, Any] = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self ):
import faiss
_lowerCamelCase : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
_lowerCamelCase : Union[str, Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_lowerCamelCase : Tuple = np.zeros(5 , dtype=np.floataa )
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase, _lowerCamelCase : Tuple = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _snake_case ( lowercase__ ):
import faiss
_lowerCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_lowerCamelCase : Dict = 'index.faiss'
_lowerCamelCase : Optional[int] = f'''mock://{index_name}'''
index.save(lowercase__ , storage_options=mockfs.storage_options )
_lowerCamelCase : Dict = FaissIndex.load(lowercase__ , storage_options=mockfs.storage_options )
_lowerCamelCase : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
_lowerCamelCase : Any = 1
_lowerCamelCase, _lowerCamelCase : List[Any] = index.search(lowercase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
_lowerCamelCase : Tuple = Elasticsearch()
_lowerCamelCase : List[Any] = {'acknowledged': True}
_lowerCamelCase : Optional[Any] = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
_lowerCamelCase : Optional[Any] = 'foo'
_lowerCamelCase : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
_lowerCamelCase, _lowerCamelCase : List[Any] = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_lowerCamelCase : List[str] = 'foo'
_lowerCamelCase : Union[str, Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
_lowerCamelCase, _lowerCamelCase : str = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_lowerCamelCase : Dict = ['foo', 'bar', 'foobar']
_lowerCamelCase : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
_lowerCamelCase, _lowerCamelCase : List[str] = index.search_batch(lowercase )
_lowerCamelCase : Union[str, Any] = [scores[0] for scores in total_scores]
_lowerCamelCase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
_lowerCamelCase : Optional[int] = ['foo', 'bar', 'foobar']
_lowerCamelCase : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = index.search_batch(lowercase , request_timeout=30 )
_lowerCamelCase : Optional[int] = [scores[0] for scores in total_scores]
_lowerCamelCase : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase ) | 96 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 1 |
'''simple docstring'''
from math import pi
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 123 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__lowerCAmelCase : Tuple =trt.Logger(trt.Logger.WARNING)
__lowerCAmelCase : Optional[Any] =absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__lowerCAmelCase : List[Any] =logging.getLogger(__name__)
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__lowerCAmelCase : Tuple =parser.parse_args()
if args.tokenizer_name:
__lowerCAmelCase : int =AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__lowerCAmelCase : Union[str, Any] =args.per_device_eval_batch_size
__lowerCAmelCase : List[Any] =(args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__lowerCAmelCase : Tuple =True
__lowerCAmelCase : int ="temp_engine/bert-fp32.engine"
if args.fpaa:
__lowerCAmelCase : Tuple ="temp_engine/bert-fp16.engine"
if args.inta:
__lowerCAmelCase : Optional[int] ="temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__lowerCAmelCase : Tuple =1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__lowerCAmelCase : Optional[Any] =[network.get_input(i) for i in range(network.num_inputs)]
__lowerCAmelCase : Any =[_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__lowerCAmelCase : Optional[Any] =1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__lowerCAmelCase : int =builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__lowerCAmelCase : Dict =builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
A__ = np.asarray(inputs["input_ids"] , dtype=np.intaa )
A__ = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
A__ = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCamelCase )
# start time
A__ = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCamelCase ) for d_inp in d_inputs] + [int(_lowerCamelCase ), int(_lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
A__ = time.time()
A__ = end_time - start_time
A__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__lowerCAmelCase : str =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase : List[Any] =load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__lowerCAmelCase : Optional[Any] =raw_datasets["validation"].column_names
__lowerCAmelCase : Optional[Any] ="question" if "question" in column_names else column_names[0]
__lowerCAmelCase : str ="context" if "context" in column_names else column_names[1]
__lowerCAmelCase : Optional[Any] ="answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__lowerCAmelCase : Any =tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__lowerCAmelCase : Any =min(args.max_seq_length, tokenizer.model_max_length)
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=_lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A__ = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A__ = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A__ = tokenized_examples.sequence_ids(_lowerCamelCase )
A__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
__lowerCAmelCase : str =raw_datasets["validation"]
# Validation Feature Creation
__lowerCAmelCase : Union[str, Any] =eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__lowerCAmelCase : List[Any] =default_data_collator
__lowerCAmelCase : List[Any] =eval_dataset.remove_columns(["example_id", "offset_mapping"])
__lowerCAmelCase : List[str] =DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
A__ = postprocess_qa_predictions(
examples=_lowerCamelCase , features=_lowerCamelCase , predictions=_lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A__ = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
A__ = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
A__ = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCamelCase , label_ids=_lowerCamelCase )
__lowerCAmelCase : Tuple =load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return trt.volume(engine.get_binding_shape(_lowerCamelCase ) ) * engine.get_binding_dtype(_lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
__lowerCAmelCase : Any =[cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__lowerCAmelCase : List[Any] =cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__lowerCAmelCase : List[str] =cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__lowerCAmelCase : List[str] =cuda.mem_alloc(h_outputa.nbytes)
__lowerCAmelCase : int =cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__lowerCAmelCase : Optional[Any] =cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__lowerCAmelCase : str =0.0
__lowerCAmelCase : Tuple =0
__lowerCAmelCase : List[str] =timeit.default_timer()
__lowerCAmelCase : Union[str, Any] =None
for step, batch in enumerate(eval_dataloader):
__lowerCAmelCase , __lowerCAmelCase : Dict =model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__lowerCAmelCase , __lowerCAmelCase : List[Any] =outputs
__lowerCAmelCase : Tuple =torch.tensor(start_logits)
__lowerCAmelCase : Tuple =torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__lowerCAmelCase : Tuple =accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__lowerCAmelCase : Union[str, Any] =accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__lowerCAmelCase : int =(accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__lowerCAmelCase : List[Any] =logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__lowerCAmelCase : Dict =nested_truncate(all_preds, len(eval_dataset))
__lowerCAmelCase : Optional[int] =timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
__lowerCAmelCase : Optional[Any] =post_processing_function(eval_examples, eval_dataset, all_preds)
__lowerCAmelCase : Optional[Any] =metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 123 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class UpperCAmelCase__ ( A__ ):
'''simple docstring'''
UpperCamelCase = 'swinv2'
UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : int , a_ : Union[str, Any]=2_24 , a_ : List[str]=4 , a_ : Optional[Any]=3 , a_ : Tuple=96 , a_ : Any=[2, 2, 6, 2] , a_ : int=[3, 6, 12, 24] , a_ : Optional[int]=7 , a_ : Tuple=4.0 , a_ : int=True , a_ : str=0.0 , a_ : Any=0.0 , a_ : Tuple=0.1 , a_ : Tuple="gelu" , a_ : Dict=False , a_ : Union[str, Any]=0.0_2 , a_ : List[str]=1e-5 , a_ : List[Any]=32 , **a_ : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Union[str, Any] = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : List[Any] = embed_dim
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : Dict = len(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Optional[Any] = num_heads
__UpperCAmelCase : int = window_size
__UpperCAmelCase : str = mlp_ratio
__UpperCAmelCase : int = qkv_bias
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = drop_path_rate
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Any = use_absolute_embeddings
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase : str = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
__UpperCAmelCase : Dict = (0, 0, 0, 0)
| 226 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> int:
lowerCamelCase__ : List[Any] =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : Any =get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(lowerCamelCase ), torch_builtin(lowerCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCamelCase ), gelu_new(lowerCamelCase ) ) )
def snake_case ( self : str )-> Optional[Any]:
lowerCamelCase__ : List[str] =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : Any =get_activation('''gelu''' )
lowerCamelCase__ : List[Any] =get_activation('''gelu_10''' )
lowerCamelCase__ : Dict =torch_builtin(lowerCamelCase )
lowerCamelCase__ : str =geluaa(lowerCamelCase )
lowerCamelCase__ : str =torch.where(y_gelu_aa < 10.0, 1, 0 )
self.assertTrue(torch.max(lowerCamelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_aa * clipped_mask ) )
def snake_case ( self : Any )-> int:
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(lowerCamelCase ):
get_activation('''bogus''' )
with self.assertRaises(lowerCamelCase ):
get_activation(lowerCamelCase )
def snake_case ( self : List[Any] )-> List[Any]:
lowerCamelCase__ : List[str] =get_activation('''gelu''' )
lowerCamelCase__ : Any =1
lowerCamelCase__ : Union[str, Any] =get_activation('''gelu''' )
self.assertEqual(acta.a, 1 )
with self.assertRaises(lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =acta.a
| 351 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'blenderbot-small'
_a = ['past_key_values']
_a = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple, lowerCamelCase : Any=5_0265, lowerCamelCase : Optional[Any]=512, lowerCamelCase : Union[str, Any]=8, lowerCamelCase : Dict=2048, lowerCamelCase : str=16, lowerCamelCase : List[Any]=8, lowerCamelCase : List[str]=2048, lowerCamelCase : int=16, lowerCamelCase : Any=0.0, lowerCamelCase : Dict=0.0, lowerCamelCase : Tuple=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Tuple=512, lowerCamelCase : Tuple=0.1, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : List[str]=0.02, lowerCamelCase : Any=1, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Any=0, lowerCamelCase : Tuple=1, lowerCamelCase : Tuple=2, lowerCamelCase : Dict=2, **lowerCamelCase : Any, )-> Dict:
lowerCamelCase__ : Dict =vocab_size
lowerCamelCase__ : Dict =max_position_embeddings
lowerCamelCase__ : Optional[Any] =d_model
lowerCamelCase__ : Union[str, Any] =encoder_ffn_dim
lowerCamelCase__ : Optional[Any] =encoder_layers
lowerCamelCase__ : Any =encoder_attention_heads
lowerCamelCase__ : Union[str, Any] =decoder_ffn_dim
lowerCamelCase__ : Optional[int] =decoder_layers
lowerCamelCase__ : Any =decoder_attention_heads
lowerCamelCase__ : Optional[int] =dropout
lowerCamelCase__ : str =attention_dropout
lowerCamelCase__ : Union[str, Any] =activation_dropout
lowerCamelCase__ : Tuple =activation_function
lowerCamelCase__ : str =init_std
lowerCamelCase__ : List[Any] =encoder_layerdrop
lowerCamelCase__ : List[str] =decoder_layerdrop
lowerCamelCase__ : Tuple =use_cache
lowerCamelCase__ : Optional[Any] =encoder_layers
lowerCamelCase__ : List[Any] =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, is_encoder_decoder=lowerCamelCase, decoder_start_token_id=lowerCamelCase, forced_eos_token_id=lowerCamelCase, **lowerCamelCase, )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def snake_case ( self : Optional[int] )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : int =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase__ : List[str] ={0: '''batch'''}
lowerCamelCase__ : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCamelCase__ : Union[str, Any] ={0: '''batch''', 1: '''decoder_sequence'''}
lowerCamelCase__ : Tuple ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase, direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Optional[Any] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.num_layers
for i in range(lowerCamelCase ):
lowerCamelCase__ : Optional[Any] ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase__ : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCamelCase__ : str =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def snake_case ( self : Dict )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict =super().outputs
else:
lowerCamelCase__ : Optional[Any] =super(lowerCamelCase, self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : str =self.num_layers
for i in range(lowerCamelCase ):
lowerCamelCase__ : Tuple ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase__ : Dict ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def snake_case ( self : List[str], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
lowerCamelCase__ : Any =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Generate decoder inputs
lowerCamelCase__ : str =seq_length if not self.use_past else 1
lowerCamelCase__ : Tuple =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : str ={F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Tuple =dict(**lowerCamelCase, **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : List[Any] =common_inputs['''input_ids'''].shape
lowerCamelCase__ : Optional[Any] =common_inputs['''decoder_input_ids'''].shape[1]
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.num_attention_heads
lowerCamelCase__ : str =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : List[Any] =decoder_seq_length + 3
lowerCamelCase__ : Optional[int] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : Optional[Any] =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase, lowerCamelCase )], dim=1 )
lowerCamelCase__ : Tuple =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : int =self.num_layers
lowerCamelCase__ : Any =min(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any =max(lowerCamelCase, lowerCamelCase ) - min_num_layers
lowerCamelCase__ : Dict ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
lowerCamelCase__ : Union[str, Any] =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase, lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def snake_case ( self : List[str], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
lowerCamelCase__ : int =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : List[str] =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Union[str, Any] =seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : int =self.num_layers
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.num_attention_heads
lowerCamelCase__ : int =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : str =common_inputs['''attention_mask'''].dtype
lowerCamelCase__ : int =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase, lowerCamelCase, dtype=lowerCamelCase )], dim=1 )
lowerCamelCase__ : str =[
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def snake_case ( self : Optional[int], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ : int =compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : Optional[int] =tokenizer.num_special_tokens_to_add(lowerCamelCase )
lowerCamelCase__ : Optional[int] =compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Optional[int] =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Optional[Any] =dict(tokenizer(lowerCamelCase, return_tensors=lowerCamelCase ) )
return common_inputs
def snake_case ( self : List[Any], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Union[str, Any] =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
elif self.task == "causal-lm":
lowerCamelCase__ : Union[str, Any] =self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
else:
lowerCamelCase__ : List[Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
return common_inputs
def snake_case ( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Any )-> str:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Any =super()._flatten_past_key_values_(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase__ : List[str] =super(lowerCamelCase, self )._flatten_past_key_values_(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
| 272 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__ (lowercase__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :str = ShapEPipeline
__lowerCAmelCase :Optional[int] = ["prompt"]
__lowerCAmelCase :Tuple = ["prompt"]
__lowerCAmelCase :Optional[Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__lowerCAmelCase :Dict = False
@property
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
return 8
@property
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 1_6,
"embedding_dim": self.time_input_dim,
"num_embeddings": 3_2,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
a__ : List[str] = PriorTransformer(**__lowerCamelCase )
return model
@property
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 1_2,
"background": (
0.1,
0.1,
0.1,
),
}
a__ : Any = ShapERenderer(**__lowerCamelCase )
return model
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = self.dummy_prior
a__ : str = self.dummy_text_encoder
a__ : Optional[int] = self.dummy_tokenizer
a__ : Dict = self.dummy_renderer
a__ : Any = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_0_2_4 , prediction_type="""sample""" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , )
a__ : Tuple = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=0 ) -> Optional[Any]:
"""simple docstring"""
if str(__lowerCamelCase ).startswith("""mps""" ):
a__ : Dict = torch.manual_seed(__lowerCamelCase )
else:
a__ : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
a__ : List[str] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 3_2,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Any = "cpu"
a__ : Any = self.get_dummy_components()
a__ : Dict = self.pipeline_class(**__lowerCamelCase )
a__ : List[Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
a__ : Dict = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
a__ : List[Any] = output.images[0]
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
a__ : str = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Tuple = torch_device == "cpu"
a__ : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = self.get_dummy_components()
a__ : Union[str, Any] = self.pipeline_class(**__lowerCamelCase )
a__ : str = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
a__ : str = 1
a__ : List[Any] = 2
a__ : Dict = self.get_dummy_inputs(__lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
a__ : Optional[Any] = batch_size * [inputs[key]]
a__ : Optional[int] = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
a__ : Optional[Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
a__ : Tuple = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
a__ : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
a__ : Optional[int] = pipe(
"""a shark""" , generator=__lowerCamelCase , guidance_scale=1_5.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="""np""" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 170 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _lowercase :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=14 , __lowerCamelCase : str=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=99 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : Dict=0.0_2 , ):
'''simple docstring'''
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Tuple = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : List[Any] = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : int = rotary_dim
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Any = None
lowerCamelCase__ : Optional[Any] = vocab_size - 1
lowerCamelCase__ : List[Any] = vocab_size - 1
lowerCamelCase__ : str = vocab_size - 1
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Dict = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : Tuple = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = 20
lowerCamelCase__ : Tuple = model_class_name(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = model.init_cache(input_ids.shape[0] , __lowerCamelCase )
lowerCamelCase__ : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCamelCase__ : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : Tuple = model(
input_ids[:, :-1] , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , position_ids=__lowerCamelCase , )
lowerCamelCase__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCamelCase__ : str = model(
input_ids[:, -1:] , attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , position_ids=__lowerCamelCase , )
lowerCamelCase__ : List[Any] = model(__lowerCamelCase )
lowerCamelCase__ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : int = 20
lowerCamelCase__ : int = model_class_name(__lowerCamelCase )
lowerCamelCase__ : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCamelCase__ : Dict = model.init_cache(input_ids.shape[0] , __lowerCamelCase )
lowerCamelCase__ : Any = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : List[Any] = model(
input_ids[:, :-1] , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , position_ids=__lowerCamelCase , )
lowerCamelCase__ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCamelCase__ : Any = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__lowerCamelCase , position_ids=__lowerCamelCase , )
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
lowerCamelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Any = FlaxGPTJModelTester(self )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@tooslow
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
lowerCamelCase__ : List[str] = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=__lowerCamelCase , truncation=__lowerCamelCase )
lowerCamelCase__ : Any = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : str = model.config.eos_token_id
lowerCamelCase__ : int = jax.jit(model.generate )
lowerCamelCase__ : str = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCamelCase__ : Tuple = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
lowerCamelCase__ : Any = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : int = getattr(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = pt_inputs["input_ids"].shape
lowerCamelCase__ : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = 1
lowerCamelCase__ : List[Any] = pt_model_class(__lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = model_class(__lowerCamelCase , dtype=jnp.floataa )
lowerCamelCase__ : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __lowerCamelCase )
lowerCamelCase__ : Optional[int] = fx_state
with torch.no_grad():
lowerCamelCase__ : List[Any] = pt_model(**__lowerCamelCase ).to_tuple()
lowerCamelCase__ : Union[str, Any] = fx_model(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : List[Any] = model_class.from_pretrained(__lowerCamelCase , from_pt=__lowerCamelCase )
lowerCamelCase__ : int = fx_model_loaded(**__lowerCamelCase ).to_tuple()
self.assertEqual(
len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : List[Any] = getattr(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = pt_model_class(__lowerCamelCase ).eval()
lowerCamelCase__ : Tuple = model_class(__lowerCamelCase , dtype=jnp.floataa )
lowerCamelCase__ : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCamelCase , fx_model.params )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pt_inputs["input_ids"].shape
lowerCamelCase__ : Optional[int] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase__ : Dict = pt_model(**__lowerCamelCase ).to_tuple()
lowerCamelCase__ : List[str] = fx_model(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : Any = pt_model_class.from_pretrained(__lowerCamelCase , from_flax=__lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = pt_model_loaded(**__lowerCamelCase ).to_tuple()
self.assertEqual(
len(__lowerCamelCase ) , len(__lowerCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
lowerCamelCase__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
| 184 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = ["""image_processor""", """tokenizer"""]
a__ : Any = """BlipImageProcessor"""
a__ : int = """AutoTokenizer"""
def __init__( self , __lowercase , __lowercase , __lowercase) -> Any:
super().__init__(__lowercase , __lowercase)
# add QFormer tokenizer
__UpperCamelCase :Optional[Any] = qformer_tokenizer
def __call__( self , __lowercase = None , __lowercase = None , __lowercase = True , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = 0 , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = True , __lowercase = None , **__lowercase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''')
__UpperCamelCase :Tuple = BatchFeature()
if text is not None:
__UpperCamelCase :Tuple = self.tokenizer(
text=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_token_type_ids=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
encoding.update(__lowercase)
__UpperCamelCase :Dict = self.qformer_tokenizer(
text=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_token_type_ids=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
__UpperCamelCase :List[str] = qformer_text_encoding.pop('''input_ids''')
__UpperCamelCase :List[Any] = qformer_text_encoding.pop('''attention_mask''')
if images is not None:
__UpperCamelCase :List[Any] = self.image_processor(__lowercase , return_tensors=__lowercase)
encoding.update(__lowercase)
return encoding
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> Optional[int]:
return self.tokenizer.decode(*__lowercase , **__lowercase)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[Any] = self.tokenizer.model_input_names
__UpperCamelCase :Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
def UpperCamelCase__ ( self , __lowercase , **__lowercase) -> Optional[Any]:
if os.path.isfile(__lowercase):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(__lowercase , exist_ok=__lowercase)
__UpperCamelCase :int = os.path.join(__lowercase , '''qformer_tokenizer''')
self.qformer_tokenizer.save_pretrained(__lowercase)
return super().save_pretrained(__lowercase , **__lowercase)
@classmethod
def UpperCamelCase__ ( cls , __lowercase , **__lowercase) -> Optional[int]:
__UpperCamelCase :int = AutoTokenizer.from_pretrained(__lowercase , subfolder='''qformer_tokenizer''')
__UpperCamelCase :List[Any] = cls._get_arguments_from_pretrained(__lowercase , **__lowercase)
args.append(__lowercase)
return cls(*__lowercase)
| 105 | import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = torch.nn.Linear(2 , 4 )
__UpperCamelCase :Any = torch.optim.AdamW(model.parameters() , lr=1.0 )
__UpperCamelCase :List[Any] = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__UpperCamelCase :List[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__UpperCamelCase :Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@require_cuda
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Dict = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowercase):
__UpperCamelCase :Any = Accelerator(cpu=__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = Accelerator()
__UpperCamelCase :List[Any] = GradientState()
assert state.num_steps == 1
__UpperCamelCase :Any = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__UpperCamelCase :int = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Tuple = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = create_components()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :int = accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def UpperCamelCase__ ( self) -> Union[str, Any]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__lowercase , **__lowercase):
pass
with patch('''torch.cuda.set_device''' , __lowercase), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64'''):
__UpperCamelCase :Optional[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device) , '''cuda:64''')
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
__UpperCamelCase :Tuple = get_signature(__lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase)
# make sure random weights don't match
load_random_weights(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) > 1E-3)
# make sure loaded weights match
accelerator.load_state(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) < 1E-3)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
__UpperCamelCase :Any = get_signature(__lowercase)
# saving hook
def save_config(__lowercase , __lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(__lowercase , '''data.json''') , '''w''') as f:
json.dump(__lowercase , __lowercase)
# loading hook
def load_config(__lowercase , __lowercase):
with open(os.path.join(__lowercase , '''data.json''') , '''r''') as f:
__UpperCamelCase :Dict = json.load(__lowercase)
__UpperCamelCase :Dict = config['''class_name''']
__UpperCamelCase :Union[str, Any] = accelerator.register_save_state_pre_hook(__lowercase)
__UpperCamelCase :Any = accelerator.register_load_state_pre_hook(__lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase)
# make sure random weights don't match with hooks
load_random_weights(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) > 1E-3)
# random class name to verify correct one is loaded
__UpperCamelCase :int = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) < 1E-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase)
# make sure random weights don't match with hooks removed
load_random_weights(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) > 1E-3)
# random class name to verify correct one is loaded
__UpperCamelCase :Dict = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) < 1E-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = create_components()
__UpperCamelCase :Optional[Any] = None
# This should work
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
self.assertTrue(dummy_obj is None)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = create_components()
__UpperCamelCase :Dict = [1, 2, 3]
# This should work
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def UpperCamelCase__ ( self) -> int:
from transformers import AutoModelForCausalLM
__UpperCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map={'''''': 0} , )
__UpperCamelCase :Optional[Any] = Accelerator()
# This should work
__UpperCamelCase :int = accelerator.prepare(__lowercase)
@slow
@require_bnb
def UpperCamelCase__ ( self) -> List[str]:
from transformers import AutoModelForCausalLM
__UpperCamelCase :str = Accelerator()
with init_empty_weights():
__UpperCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__UpperCamelCase :List[str] = infer_auto_device_map(__lowercase)
__UpperCamelCase :str = '''cpu'''
__UpperCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=__lowercase , load_in_abit=__lowercase , llm_inta_enable_fpaa_cpu_offload=__lowercase)
# This should not work and get value error
with self.assertRaises(__lowercase):
__UpperCamelCase :Union[str, Any] = accelerator.prepare(__lowercase)
@slow
@require_bnb
@require_multi_gpu
def UpperCamelCase__ ( self) -> Union[str, Any]:
from transformers import AutoModelForCausalLM
__UpperCamelCase :int = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__UpperCamelCase :Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__UpperCamelCase :int = infer_auto_device_map(__lowercase)
__UpperCamelCase :List[Any] = 1
__UpperCamelCase :int = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map=__lowercase , )
__UpperCamelCase :Dict = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowercase):
__UpperCamelCase :Any = accelerator.prepare(__lowercase)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCamelCase__ ( self) -> Dict:
from transformers import AutoModelForCausalLM
with init_empty_weights():
__UpperCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__UpperCamelCase :List[str] = infer_auto_device_map(__lowercase)
__UpperCamelCase :Optional[int] = 1
__UpperCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map=__lowercase , )
__UpperCamelCase :int = Accelerator()
# This should work
__UpperCamelCase :int = accelerator.prepare(__lowercase)
@require_cuda
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Tuple = torch.nn.Linear(10 , 10)
__UpperCamelCase :Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.01)
__UpperCamelCase :Any = Accelerator(cpu=__lowercase)
__UpperCamelCase :Tuple = accelerator.prepare(__lowercase)
| 105 | 1 |
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = 0
while len(_lowerCAmelCase) > 1:
UpperCamelCase_ = 0
# Consider two files with minimum cost to be merged
for _ in range(2):
UpperCamelCase_ = files.index(min(_lowerCAmelCase))
temp += files[min_index]
files.pop(_lowerCAmelCase)
files.append(_lowerCAmelCase)
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
UpperCAmelCase : int ="""Hello, World!"""
UpperCAmelCase : int ="""en_XX"""
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = Path("data_bin")
UpperCamelCase_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase).parent) , checkpoint_file=Path(_lowerCAmelCase).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(_lowerCAmelCase) , bpe="sentencepiece" , sentencepiece_model=str(Path(_lowerCAmelCase).parent / "sentencepiece.bpe.model") , src_dict=str(data_dir / "dict.txt") , )
xmod.eval() # disable dropout
print(_lowerCAmelCase)
UpperCamelCase_ = xmod.model.encoder.sentence_encoder
UpperCamelCase_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCamelCase_ = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , _lowerCAmelCase)
UpperCamelCase_ = XmodForSequenceClassification(_lowerCAmelCase) if classification_head else XmodForMaskedLM(_lowerCAmelCase)
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase_ = xmod_sent_encoder.embed_tokens.weight
UpperCamelCase_ = xmod_sent_encoder.embed_positions.weight
UpperCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
UpperCamelCase_ = xmod_sent_encoder.layernorm_embedding.weight
UpperCamelCase_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
UpperCamelCase_ = model.roberta.encoder.layer[i]
UpperCamelCase_ = xmod_sent_encoder.layers[i]
# self attention
UpperCamelCase_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError("Dimensions of self-attention weights do not match.")
UpperCamelCase_ = xmod_layer.self_attn.q_proj.weight
UpperCamelCase_ = xmod_layer.self_attn.q_proj.bias
UpperCamelCase_ = xmod_layer.self_attn.k_proj.weight
UpperCamelCase_ = xmod_layer.self_attn.k_proj.bias
UpperCamelCase_ = xmod_layer.self_attn.v_proj.weight
UpperCamelCase_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match.")
UpperCamelCase_ = xmod_layer.self_attn.out_proj.weight
UpperCamelCase_ = xmod_layer.self_attn.out_proj.bias
UpperCamelCase_ = xmod_layer.self_attn_layer_norm.weight
UpperCamelCase_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCamelCase_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match.")
UpperCamelCase_ = xmod_layer.fca.weight
UpperCamelCase_ = xmod_layer.fca.bias
# output
UpperCamelCase_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match.")
UpperCamelCase_ = xmod_layer.fca.weight
UpperCamelCase_ = xmod_layer.fca.bias
UpperCamelCase_ = xmod_layer.final_layer_norm.weight
UpperCamelCase_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCamelCase_ = xmod_layer.adapter_layer_norm.weight
UpperCamelCase_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError("Lists of language adapters do not match.")
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCamelCase_ = bert_output.adapter_modules[lang_code]
UpperCamelCase_ = xmod_layer.adapter_modules[lang_code]
UpperCamelCase_ = from_adapter.fca.weight
UpperCamelCase_ = from_adapter.fca.bias
UpperCamelCase_ = from_adapter.fca.weight
UpperCamelCase_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCamelCase_ = xmod_sent_encoder.layer_norm.weight
UpperCamelCase_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCamelCase_ = xmod.model.classification_heads["mnli"].dense.weight
UpperCamelCase_ = xmod.model.classification_heads["mnli"].dense.bias
UpperCamelCase_ = xmod.model.classification_heads["mnli"].out_proj.weight
UpperCamelCase_ = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCamelCase_ = xmod.model.encoder.lm_head.dense.weight
UpperCamelCase_ = xmod.model.encoder.lm_head.dense.bias
UpperCamelCase_ = xmod.model.encoder.lm_head.layer_norm.weight
UpperCamelCase_ = xmod.model.encoder.lm_head.layer_norm.bias
UpperCamelCase_ = xmod.model.encoder.lm_head.weight
UpperCamelCase_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase_ = xmod.encode(_lowerCAmelCase).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase)
UpperCamelCase_ = model(_lowerCAmelCase)[0]
if classification_head:
UpperCamelCase_ = xmod.model.classification_heads["mnli"](xmod.extract_features(_lowerCAmelCase))
else:
UpperCamelCase_ = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
UpperCamelCase_ = torch.max(torch.abs(our_output - their_output)).item()
print(f"""max_absolute_diff = {max_absolute_diff}""") # ~ 1e-7
UpperCamelCase_ = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3)
print("Do both models output the same tensors?" , "🔥" if success else "💩")
if not success:
raise Exception("Something went wRoNg")
Path(_lowerCAmelCase).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase)
print(f"""Saving model to {pytorch_dump_folder_path}""")
model.save_pretrained(_lowerCAmelCase)
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
UpperCAmelCase : Tuple =parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 128 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
_UpperCAmelCase : List[str] =sorted(string.lower() )
return len(__lowerCamelCase ) == len(set(__lowerCamelCase ) )
if __name__ == "__main__":
lowercase =input('Enter a string ').strip()
lowercase =is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 354 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="levit"
def __init__( self , snake_case=2_2_4 , snake_case=3 , snake_case=3 , snake_case=2 , snake_case=1 , snake_case=1_6 , snake_case=[1_2_8, 2_5_6, 3_8_4] , snake_case=[4, 8, 1_2] , snake_case=[4, 4, 4] , snake_case=[1_6, 1_6, 1_6] , snake_case=0 , snake_case=[2, 2, 2] , snake_case=[2, 2, 2] , snake_case=0.02 , **snake_case , ) -> Any:
'''simple docstring'''
super().__init__(**snake_case)
_UpperCAmelCase : List[str] =image_size
_UpperCAmelCase : str =num_channels
_UpperCAmelCase : int =kernel_size
_UpperCAmelCase : Any =stride
_UpperCAmelCase : Tuple =padding
_UpperCAmelCase : Optional[int] =hidden_sizes
_UpperCAmelCase : List[str] =num_attention_heads
_UpperCAmelCase : List[Any] =depths
_UpperCAmelCase : Any =key_dim
_UpperCAmelCase : List[Any] =drop_path_rate
_UpperCAmelCase : int =patch_size
_UpperCAmelCase : Tuple =attention_ratio
_UpperCAmelCase : Any =mlp_ratio
_UpperCAmelCase : Optional[Any] =initializer_range
_UpperCAmelCase : str =[
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =version.parse("1.11" )
@property
def lowerCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCAmelCase ( self) -> float:
'''simple docstring'''
return 1E-4
| 242 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=50 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> Union[str, Any]:
__lowerCamelCase : Any = parent
__lowerCamelCase : Tuple = batch_size
__lowerCamelCase : List[Any] = seq_length
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : Union[str, Any] = use_input_mask
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Dict = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : int = use_labels
__lowerCamelCase : List[str] = scope
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = None
if self.use_input_mask:
__lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self ) -> List[str]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__a , initializer_range=self.initializer_range , )
def lowercase_ ( self ) -> List[str]:
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
__lowerCamelCase : Optional[int] = BertGenerationEncoder(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : Any = model(__a , attention_mask=__a )
__lowerCamelCase : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) -> str:
__lowerCamelCase : Any = True
__lowerCamelCase : Optional[int] = BertGenerationEncoder(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : Any = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
__lowerCamelCase : str = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
__lowerCamelCase : Any = True
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : str = BertGenerationDecoder(config=__a ).to(__a ).eval()
# first forward pass
__lowerCamelCase : Optional[Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
__lowerCamelCase : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase : Union[str, Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )['hidden_states'][0]
__lowerCamelCase : Union[str, Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )['hidden_states'][0]
# select random slice
__lowerCamelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
__lowerCamelCase : Dict = BertGenerationDecoder(__a )
model.to(__a )
model.eval()
__lowerCamelCase : str = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
__lowerCamelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_lowercase , _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowerCamelCase : Tuple = (BertGenerationDecoder,) if is_torch_available() else ()
lowerCamelCase : List[Any] = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[Any] = BertGenerationEncoderTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowercase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : Any = 'bert'
self.model_tester.create_and_check_model(__a , __a , __a , __a )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a )
def lowercase_ ( self ) -> Union[str, Any]:
# This regression test was failing with PyTorch < 1.3
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase : Any = None
self.model_tester.create_and_check_model_as_decoder(
__a , __a , __a , __a , __a , __a , )
def lowercase_ ( self ) -> str:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__a )
@slow
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__a )
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Union[str, Any] = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
__lowerCamelCase : Optional[int] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(__a )[0]
__lowerCamelCase : List[str] = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , __a )
__lowerCamelCase : str = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : List[str] = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
__lowerCamelCase : Union[str, Any] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(__a )[0]
__lowerCamelCase : Any = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , __a )
__lowerCamelCase : Dict = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 185 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if name is None:
UpperCamelCase = None
else:
UpperCamelCase = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
UpperCamelCase = fmt.format(_SCREAMING_SNAKE_CASE )
# Print and recurse (if needed).
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if msg is not None:
print(_SCREAMING_SNAKE_CASE )
for k in val.keys():
recursive_print(_SCREAMING_SNAKE_CASE , val[k] , spaces + 2 )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
print(_SCREAMING_SNAKE_CASE , ":" , val.size() )
else:
print(_SCREAMING_SNAKE_CASE , ":" , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
UpperCamelCase = param.transpose(0 , 2 )
UpperCamelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
UpperCamelCase = param.transpose(0 , 1 ).contiguous()
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
return param
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {}
# old versions did not store training args
UpperCamelCase = input_state_dict.get("args" , _SCREAMING_SNAKE_CASE )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase = ds_args.padded_vocab_size
UpperCamelCase = ds_args.max_position_embeddings
UpperCamelCase = ds_args.hidden_size
UpperCamelCase = ds_args.num_layers
UpperCamelCase = ds_args.num_attention_heads
UpperCamelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase = config.n_head
# The hidden_size per head.
UpperCamelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase = input_state_dict["checkpoint_version"]
else:
UpperCamelCase = 0.0
# The model.
UpperCamelCase = input_state_dict["model"]
# The language model.
UpperCamelCase = model["language_model"]
# The embeddings.
UpperCamelCase = lm["embedding"]
# The word embeddings.
UpperCamelCase = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
UpperCamelCase = word_embeddings[: config.vocab_size, :]
UpperCamelCase = word_embeddings
# The position embeddings.
UpperCamelCase = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
UpperCamelCase = pos_embeddings
# The transformer.
UpperCamelCase = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
UpperCamelCase = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
UpperCamelCase = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase = layer_re.match(_SCREAMING_SNAKE_CASE )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase = m.group(3 )
# The name of the layer.
UpperCamelCase = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
UpperCamelCase = "ln_1" if op_name.startswith("input" ) else "ln_2"
UpperCamelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase = torch.tensor(-1e4 , dtype=torch.floataa )
UpperCamelCase = masked_bias
UpperCamelCase = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Store. No change of shape.
UpperCamelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase = megatron_to_transformers[op_name]
UpperCamelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase = megatron_to_transformers[op_name]
UpperCamelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase = transformer["final_layernorm.weight"]
UpperCamelCase = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase = word_embeddings
# It should be done!
return output_state_dict
def a__ ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=_SCREAMING_SNAKE_CASE , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=_SCREAMING_SNAKE_CASE , help="An optional config json file describing the pre-trained model." , )
UpperCamelCase = parser.parse_args()
# Extract the basename.
UpperCamelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
else:
UpperCamelCase = torch.load(args.path_to_checkpoint , map_location="cpu" )
UpperCamelCase = input_state_dict.get("args" , _SCREAMING_SNAKE_CASE )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase = "gelu_fast"
elif ds_args.openai_gelu:
UpperCamelCase = "gelu_new"
else:
UpperCamelCase = "gelu"
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase = "gelu_new"
# Spell out all parameters in case the defaults change.
UpperCamelCase = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=_SCREAMING_SNAKE_CASE , summary_activation=_SCREAMING_SNAKE_CASE , summary_proj_to_labels=_SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
UpperCamelCase = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
UpperCamelCase = convert_megatron_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
UpperCamelCase = "gpt2"
UpperCamelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = type(_SCREAMING_SNAKE_CASE ).__name__
UpperCamelCase = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# Store the state_dict to file.
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "pytorch_model.bin" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 153 | 0 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_UpperCAmelCase = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase ( __lowercase : Any ,__lowercase : str ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = _TestCommandArgs(dataset=__lowercase ,all_configs=__lowercase ,save_infos=__lowercase )
A_ : List[Any] = TestCommand(*__lowercase )
test_command.run()
A_ : Any = os.path.join(__lowercase ,'README.md' )
assert os.path.exists(__lowercase )
A_ : Tuple = DatasetInfosDict.from_directory(__lowercase )
A_ : Any = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) ,splits=[
{
'name': 'train',
'num_bytes': 2_35_15_63,
'num_examples': 1_00_00,
},
{
'name': 'validation',
'num_bytes': 23_84_18,
'num_examples': 10_00,
},
] ,download_size=3_94_06_80 ,dataset_size=2_58_99_81 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A_ , A_ : Union[str, Any] = getattr(dataset_infos['default'] ,__lowercase ), getattr(expected_dataset_infos['default'] ,__lowercase )
if key == "num_bytes":
assert is_apercent_close(__lowercase ,__lowercase )
elif key == "splits":
assert list(__lowercase ) == list(__lowercase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 192 | from __future__ import annotations
import requests
_UpperCAmelCase = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def UpperCamelCase ( __lowercase : str ,__lowercase : int = 1 ,__lowercase : str = "new" ,__lowercase : list | None = None ):
'''simple docstring'''
A_ : Tuple = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
A_ : int = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(__lowercase )
A_ : Optional[int] = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' ,headers={'User-agent': 'A random string'} ,)
if response.status_code == 4_29:
raise requests.HTTPError
A_ : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
A_ : Union[str, Any] = {}
for id_ in range(__lowercase ):
A_ : List[str] = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 192 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={"vocab_file": "spm_char.model"}
__SCREAMING_SNAKE_CASE ={
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__SCREAMING_SNAKE_CASE ={
"microsoft/speecht5_asr": 1024,
"microsoft/speecht5_tts": 1024,
"microsoft/speecht5_vc": 1024,
}
class UpperCamelCase ( lowercase_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self ,__UpperCamelCase ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
lowercase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCamelCase ,)
lowercase_ : int = vocab_file
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : int = self.__dict__.copy()
lowercase_ : int = None
return state
def __setstate__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Any = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
lowercase_ : Union[str, Any] = {}
lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase ,out_type=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : str = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = []
lowercase_ : List[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
lowercase_ : List[Any] = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase )
lowercase_ : Optional[Any] = [1]
if token_ids_a is None:
return ([0] * len(__UpperCamelCase )) + suffix_ones
return ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : List[Any] = os.path.join(
__UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase ,'wb' ) as fi:
lowercase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 213 | """simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
@staticmethod
def _UpperCAmelCase ( *__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
lowercase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : int = pipeline(
'zero-shot-object-detection' ,model='hf-internal-testing/tiny-random-owlvit-object-detection' )
lowercase_ : Optional[int] = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Tuple = object_detector(examples[0] ,threshold=0.0 )
lowercase_ : Any = len(__UpperCamelCase )
self.assertGreater(__UpperCamelCase ,0 )
self.assertEqual(
__UpperCamelCase ,[
{
'score': ANY(__UpperCamelCase ),
'label': ANY(__UpperCamelCase ),
'box': {'xmin': ANY(__UpperCamelCase ), 'ymin': ANY(__UpperCamelCase ), 'xmax': ANY(__UpperCamelCase ), 'ymax': ANY(__UpperCamelCase )},
}
for i in range(__UpperCamelCase )
] ,)
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = pipeline(
'zero-shot-object-detection' ,model='hf-internal-testing/tiny-random-owlvit-object-detection' )
lowercase_ : Optional[int] = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' ,candidate_labels=['cat', 'remote', 'couch'] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] ,)
lowercase_ : Union[str, Any] = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] ,threshold=0.64 ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] ,)
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = pipeline('zero-shot-object-detection' )
lowercase_ : List[str] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] ,)
lowercase_ : Union[str, Any] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] ,)
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = 0.2
lowercase_ : Any = pipeline('zero-shot-object-detection' )
lowercase_ : str = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,threshold=__UpperCamelCase ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] ,)
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = 2
lowercase_ : Optional[Any] = pipeline('zero-shot-object-detection' )
lowercase_ : List[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,candidate_labels=['cat', 'remote', 'couch'] ,top_k=__UpperCamelCase ,)
self.assertEqual(
nested_simplify(__UpperCamelCase ,decimals=4 ) ,[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] ,)
| 213 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = '''gpt_neo'''
SCREAMING_SNAKE_CASE_ : str = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self ,SCREAMING_SNAKE_CASE__=5_02_57 ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=24 ,SCREAMING_SNAKE_CASE__=[[["global", "local"], 12]] ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__="gelu_new" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=1E-5 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=5_02_56 ,SCREAMING_SNAKE_CASE__=5_02_56 ,**SCREAMING_SNAKE_CASE__ ,) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = vocab_size
__SCREAMING_SNAKE_CASE :List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE :int = hidden_size
__SCREAMING_SNAKE_CASE :Optional[int] = num_layers
__SCREAMING_SNAKE_CASE :List[str] = num_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE :str = window_size
__SCREAMING_SNAKE_CASE :Dict = activation_function
__SCREAMING_SNAKE_CASE :Tuple = resid_dropout
__SCREAMING_SNAKE_CASE :str = embed_dropout
__SCREAMING_SNAKE_CASE :str = attention_dropout
__SCREAMING_SNAKE_CASE :Tuple = classifier_dropout
__SCREAMING_SNAKE_CASE :Tuple = layer_norm_epsilon
__SCREAMING_SNAKE_CASE :List[Any] = initializer_range
__SCREAMING_SNAKE_CASE :Dict = use_cache
__SCREAMING_SNAKE_CASE :Optional[Any] = bos_token_id
__SCREAMING_SNAKE_CASE :List[str] = eos_token_id
__SCREAMING_SNAKE_CASE :List[Any] = attention_types
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.expand_attention_types_params(SCREAMING_SNAKE_CASE__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCamelCase ( a_ : List[Any] , a_ : str , a_ : List[str] , a_ : Tuple ) -> List[Any]:
import torch
__SCREAMING_SNAKE_CASE :str = input.size()
__SCREAMING_SNAKE_CASE :int = len(a_ )
__SCREAMING_SNAKE_CASE :Tuple = shape[dimension]
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.arange(0 , a_ , a_ )
__SCREAMING_SNAKE_CASE :int = torch.div(sizedim - size , a_ , rounding_mode='''floor''' ) + 1
__SCREAMING_SNAKE_CASE :Any = torch.arange(a_ ) + low_indices[:min_length][:, None]
__SCREAMING_SNAKE_CASE :List[str] = [slice(a_ )] * rank
__SCREAMING_SNAKE_CASE :List[Any] = indices
__SCREAMING_SNAKE_CASE :str = input[s]
__SCREAMING_SNAKE_CASE :Tuple = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(a_ )
def __lowerCamelCase ( a_ : str , a_ : Union[str, Any] ) -> Any:
import torch
__SCREAMING_SNAKE_CASE :Dict = torch.arange(1 , a_ )
__SCREAMING_SNAKE_CASE :List[str] = torch.remainder(a_ , a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = remainders == 0
__SCREAMING_SNAKE_CASE :Any = candidates[divisor_indices]
__SCREAMING_SNAKE_CASE :Dict = torch.max(a_ )
return largest_divisor, torch.div(a_ , a_ , rounding_mode='''floor''' )
class _SCREAMING_SNAKE_CASE( A ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction='''inputs''' )
__SCREAMING_SNAKE_CASE :Dict = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE :Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self._config.num_heads
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = super(SCREAMING_SNAKE_CASE__ ,self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE :Optional[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE :Optional[int] = seqlen + 2
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__SCREAMING_SNAKE_CASE :int = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE :str = common_inputs['''attention_mask''']
if self.use_past:
__SCREAMING_SNAKE_CASE :Tuple = ordered_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE :Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )] ,dim=1 )
return ordered_inputs
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return 13 | 239 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''bart'''
SCREAMING_SNAKE_CASE_ : str = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self ,SCREAMING_SNAKE_CASE__=5_02_65 ,SCREAMING_SNAKE_CASE__=10_24 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=10_24 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=2 ,**SCREAMING_SNAKE_CASE__ ,) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = vocab_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Any = d_model
__SCREAMING_SNAKE_CASE :Optional[int] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE :List[str] = encoder_layers
__SCREAMING_SNAKE_CASE :Tuple = encoder_attention_heads
__SCREAMING_SNAKE_CASE :List[Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE :Any = decoder_layers
__SCREAMING_SNAKE_CASE :Optional[int] = decoder_attention_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = dropout
__SCREAMING_SNAKE_CASE :Optional[Any] = attention_dropout
__SCREAMING_SNAKE_CASE :Dict = activation_dropout
__SCREAMING_SNAKE_CASE :Union[str, Any] = activation_function
__SCREAMING_SNAKE_CASE :Union[str, Any] = init_std
__SCREAMING_SNAKE_CASE :int = encoder_layerdrop
__SCREAMING_SNAKE_CASE :Any = decoder_layerdrop
__SCREAMING_SNAKE_CASE :str = classifier_dropout
__SCREAMING_SNAKE_CASE :List[str] = use_cache
__SCREAMING_SNAKE_CASE :List[str] = encoder_layers
__SCREAMING_SNAKE_CASE :Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=SCREAMING_SNAKE_CASE__ ,pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,forced_eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :str = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
class _SCREAMING_SNAKE_CASE( A ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE :int = {0: '''batch'''}
__SCREAMING_SNAKE_CASE :int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE :Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
__SCREAMING_SNAKE_CASE :Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE :Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[Any] = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE :int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :str = super().outputs
else:
__SCREAMING_SNAKE_CASE :List[str] = super(SCREAMING_SNAKE_CASE__ ,self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :str = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE :Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE :Union[str, Any] = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE :Any = dict(**SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = common_inputs['''input_ids'''].shape
__SCREAMING_SNAKE_CASE :Optional[Any] = common_inputs['''decoder_input_ids'''].shape[1]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = self.num_attention_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE :Optional[int] = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE :Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )] ,dim=1 )
__SCREAMING_SNAKE_CASE :Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = self.num_layers
__SCREAMING_SNAKE_CASE :int = min(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = max(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) - min_num_layers
__SCREAMING_SNAKE_CASE :int = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE :Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) )
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE :List[str] = seqlen + 2
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = self.num_layers
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :int = self.num_attention_heads
__SCREAMING_SNAKE_CASE :Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE :Tuple = common_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )] ,dim=1 )
__SCREAMING_SNAKE_CASE :str = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(SCREAMING_SNAKE_CASE__ )
]
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE :Optional[Any] = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE :List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE :str = dict(tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ) )
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
elif self.task == "causal-lm":
__SCREAMING_SNAKE_CASE :int = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :Dict = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Dict = super(SCREAMING_SNAKE_CASE__ ,self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) | 239 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__(self : List[str] , snake_case__ : str , snake_case__ : List[str]=3 , snake_case__ : int=32 , snake_case__ : Optional[int]=3 , snake_case__ : Tuple=10 , snake_case__ : Dict=[10, 20, 30, 40] , snake_case__ : List[Any]=[1, 1, 2, 1] , snake_case__ : Optional[Any]=True , snake_case__ : Dict=True , snake_case__ : Tuple="relu" , snake_case__ : Optional[int]=3 , snake_case__ : Any=None , ) -> int:
'''simple docstring'''
snake_case : Optional[int] = parent
snake_case : List[str] = batch_size
snake_case : Optional[Any] = image_size
snake_case : str = num_channels
snake_case : Dict = embeddings_size
snake_case : List[str] = hidden_sizes
snake_case : List[Any] = depths
snake_case : Union[str, Any] = is_training
snake_case : List[Any] = use_labels
snake_case : Optional[Any] = hidden_act
snake_case : Union[str, Any] = num_labels
snake_case : Union[str, Any] = scope
snake_case : List[str] = len(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Tuple = None
if self.use_labels:
snake_case : Tuple = ids_tensor([self.batch_size] , self.num_labels )
snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
snake_case : int = TFResNetModel(config=snake_case__ )
snake_case : List[str] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = self.num_labels
snake_case : Dict = TFResNetForImageClassification(snake_case__ )
snake_case : List[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : Tuple = config_and_inputs
snake_case : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ):
A__ : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A__ : Optional[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
A__ : List[Any] = False
A__ : Tuple = False
A__ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = TFResNetModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> str:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Dict = model_class(snake_case__ )
snake_case : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Union[str, Any] = [*signature.parameters.keys()]
snake_case : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : List[str] ):
snake_case : Dict = model_class(snake_case__ )
snake_case : Optional[int] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
snake_case : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Tuple = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case : Any = layer_type
snake_case : Tuple = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : str = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCamelCase ( ):
snake_case : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case : List[Any] = self.default_image_processor
snake_case : Tuple = prepare_img()
snake_case : Optional[Any] = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
snake_case : Any = model(**snake_case__ )
# verify the logits
snake_case : Any = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
snake_case : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 59 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger()
@dataclass
class UpperCAmelCase :
A__ : nn.Module
A__ : List[nn.Module] = field(default_factory=A_ )
A__ : list = field(default_factory=A_ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Tensor , snake_case__ : Tensor ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__(self : List[Any] , snake_case__ : Tensor ) -> List[Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]:
'''simple docstring'''
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCAmelCase :
A__ : nn.Module
A__ : nn.Module
A__ : int = 1
A__ : List = field(default_factory=A_ )
A__ : List = field(default_factory=A_ )
A__ : bool = True
def __call__(self : List[Any] , snake_case__ : Tensor ) -> Any:
'''simple docstring'''
snake_case : str = Tracker(self.dest )(snake_case__ ).parametrized
snake_case : Optional[int] = Tracker(self.src )(snake_case__ ).parametrized
snake_case : List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) )
snake_case : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while"""
f""" destination module has {len(snake_case__ )}.""" )
for dest_m, src_m in zip(snake_case__ , snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class UpperCAmelCase ( nn.Module ):
def __init__(self : Tuple , snake_case__ : nn.Module ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f"""Unexpected layer name {k}"""
snake_case : Union[str, Any] = len(snake_case__ ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
snake_case : Optional[Any] = nn.ModuleDict(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Tensor ) -> Dict:
'''simple docstring'''
return get_trunk_forward_outputs(
snake_case__ , out_feat_keys=snake_case__ , feature_blocks=self._feature_blocks , )
class UpperCAmelCase ( A_ ):
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> str:
'''simple docstring'''
snake_case : List[Any] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self : Optional[int] , snake_case__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
snake_case : Dict = self.convert_name_to_timm(snake_case__ )
snake_case : Union[str, Any] = partial(lambda: (timm.create_model(snake_case__ , pretrained=snake_case__ ).eval(), None) )
else:
snake_case : List[str] = super().__getitem__(snake_case__ )
return val
class UpperCAmelCase ( A_ ):
def __getitem__(self : Dict , snake_case__ : str ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
snake_case : str = RegNetModel
else:
snake_case : Optional[Any] = RegNetForImageClassification
return val
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Tuple[str, str]] ):
for from_key, to_key in keys:
snake_case : str = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : Callable[[], nn.Module] , __lowerCamelCase : RegNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True , ):
print(f"""Converting {name}...""" )
with torch.no_grad():
snake_case , snake_case : int = from_model_func()
snake_case : str = our_model_func(__lowerCamelCase ).eval()
snake_case : int = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase , raise_if_mismatch=__lowerCamelCase )
snake_case : Dict = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
if from_state_dict is not None:
snake_case : str = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
snake_case : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
snake_case : Optional[Any] = manually_copy_vissl_head(__lowerCamelCase , our_model.state_dict() , __lowerCamelCase )
our_model.load_state_dict(__lowerCamelCase )
snake_case : Any = our_model(__lowerCamelCase , output_hidden_states=__lowerCamelCase )
snake_case : Union[str, Any] = (
our_outputs.logits if isinstance(__lowerCamelCase , __lowerCamelCase ) else our_outputs.last_hidden_state
)
snake_case : Union[str, Any] = from_model(__lowerCamelCase )
snake_case : Dict = from_output[-1] if type(__lowerCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
snake_case : Any = our_outputs.hidden_states[-1]
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__lowerCamelCase , )
snake_case : List[str] = 224 if "seer" not in name else 384
# we can use the convnext one
snake_case : int = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__lowerCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__lowerCamelCase , )
print(f"""Pushed {name}""" )
def UpperCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ):
snake_case : Union[str, Any] = "imagenet-1k-id2label.json"
snake_case : List[str] = 1000
snake_case : List[str] = (1, num_labels)
snake_case : Any = "huggingface/label-files"
snake_case : List[str] = num_labels
snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
snake_case : List[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
snake_case : str = idalabel
snake_case : List[Any] = {v: k for k, v in idalabel.items()}
snake_case : Dict = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
snake_case : Optional[Any] = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
snake_case : Union[str, Any] = NameToOurModelFuncMap()
snake_case : str = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowerCamelCase : str , __lowerCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
snake_case : List[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , model_dir=str(__lowerCamelCase ) , map_location="cpu" )
snake_case : Dict = model_func()
# check if we have a head, if yes add it
snake_case : str = files["classy_state_dict"]["base_model"]["model"]
snake_case : Dict = model_state_dict["trunk"]
model.load_state_dict(__lowerCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
snake_case : List[Any] = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : Optional[int] = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : List[str] = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case : Tuple = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
snake_case : List[Any] = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : Tuple = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
snake_case : str = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
snake_case : Dict = partial(
__lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 59 | 1 |
from typing import Any
def UpperCamelCase ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : dict , __lowerCamelCase : dict , __lowerCamelCase : dict , ):
_validation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# Creates data structures and fill initial step
snake_case : dict = {}
snake_case : dict = {}
for state in states_space:
snake_case : int = observations_space[0]
snake_case : Optional[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case : List[str] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCamelCase ) ):
snake_case : Tuple = observations_space[o]
snake_case : str = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case : Union[str, Any] = ""
snake_case : Dict = -1
for k_state in states_space:
snake_case : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case : Tuple = probability
snake_case : int = k_state
# Update probabilities and pointers dicts
snake_case : List[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case : Any = arg_max
# The final observation
snake_case : Optional[int] = observations_space[len(__lowerCamelCase ) - 1]
# argmax for given final observation
snake_case : Optional[Any] = ""
snake_case : str = -1
for k_state in states_space:
snake_case : Tuple = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case : List[str] = probability
snake_case : Union[str, Any] = k_state
snake_case : List[str] = arg_max
# Process pointers backwards
snake_case : Union[str, Any] = last_state
snake_case : Tuple = []
for o in range(len(__lowerCamelCase ) - 1 , -1 , -1 ):
result.append(__lowerCamelCase )
snake_case : Union[str, Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
_validate_not_empty(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_validate_lists(__lowerCamelCase , __lowerCamelCase )
_validate_dicts(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any ):
_validate_list(__lowerCamelCase , "observations_space" )
_validate_list(__lowerCamelCase , "states_space" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : str ):
if not isinstance(_object , __lowerCamelCase ):
snake_case : Tuple = f"""{var_name} must be a list"""
raise ValueError(__lowerCamelCase )
else:
for x in _object:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
snake_case : int = f"""{var_name} must be a list of strings"""
raise ValueError(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
_validate_dict(__lowerCamelCase , "initial_probabilities" , __lowerCamelCase )
_validate_nested_dict(__lowerCamelCase , "transition_probabilities" )
_validate_nested_dict(__lowerCamelCase , "emission_probabilities" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : str ):
_validate_dict(_object , __lowerCamelCase , __lowerCamelCase )
for x in _object.values():
_validate_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : type , __lowerCamelCase : bool = False ):
if not isinstance(_object , __lowerCamelCase ):
snake_case : List[str] = f"""{var_name} must be a dict"""
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object ):
snake_case : Optional[Any] = f"""{var_name} all keys must be strings"""
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object.values() ):
snake_case : Union[str, Any] = "nested dictionary " if nested else ""
snake_case : Any = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355 |
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("String lengths must match!" )
snake_case : Optional[Any] = 0
for chara, chara in zip(__lowerCamelCase , __lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '''▁'''
lowerCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
lowerCamelCase = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
lowerCamelCase = {'''vinai/bartpho-syllable''': 1024}
class _a ( _lowercase):
_a : List[Any] = VOCAB_FILES_NAMES
_a : List[str] = PRETRAINED_VOCAB_FILES_MAP
_a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any="<s>" , _SCREAMING_SNAKE_CASE : Tuple="</s>" , _SCREAMING_SNAKE_CASE : Optional[int]="</s>" , _SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , _SCREAMING_SNAKE_CASE : Optional[int]="<unk>" , _SCREAMING_SNAKE_CASE : List[Any]="<pad>" , _SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE : str , )-> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
lowerCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : int = vocab_file
lowerCAmelCase__ : List[Any] = monolingual_vocab_file
lowerCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ : Tuple = cnt
cnt += 1
with open(_SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowerCAmelCase__ : Any = line.strip().split()[0]
lowerCAmelCase__ : Union[str, Any] = len(self.fairseq_tokens_to_ids )
if str(_SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ : Any = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str )-> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Dict:
lowerCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
lowerCAmelCase__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None , _SCREAMING_SNAKE_CASE : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__( self : Union[str, Any] )-> Optional[Any]:
return len(self.fairseq_ids_to_tokens )
def UpperCAmelCase__( self : str )-> Optional[Any]:
lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str )-> List[str]:
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Dict )-> Any:
return self.fairseq_ids_to_tokens[index]
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : str )-> Optional[int]:
lowerCAmelCase__ : Optional[int] = ''''''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(_SCREAMING_SNAKE_CASE )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 131 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : Tuple )-> int:
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ : Tuple = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase__ : Any = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase__ : Dict = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
lowerCAmelCase__ : Any = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Union[str, Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , **_SCREAMING_SNAKE_CASE : Any )-> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple , **_SCREAMING_SNAKE_CASE : List[str] )-> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str )-> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__( self : Dict )-> Any:
lowerCAmelCase__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__( self : int )-> List[str]:
lowerCAmelCase__ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : Tuple = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] )-> Optional[int]:
lowerCAmelCase__ : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCAmelCase__ : Union[str, Any] = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
lowerCAmelCase__ : List[Any] = self.get_image_processor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : Optional[Any] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.prepare_image_inputs()
lowerCAmelCase__ : List[Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
lowerCAmelCase__ : Optional[int] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__( self : Any )-> Tuple:
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : List[str] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = '''lower newer'''
lowerCAmelCase__ : List[Any] = processor(text=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
lowerCAmelCase__ : Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase__( self : Union[str, Any] )-> Tuple:
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : List[str] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = '''lower newer'''
lowerCAmelCase__ : str = self.prepare_image_inputs()
lowerCAmelCase__ : Tuple = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
lowerCAmelCase__ : List[Any] = '''google/owlvit-base-patch32'''
lowerCAmelCase__ : str = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = ['''cat''', '''nasa badge''']
lowerCAmelCase__ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__( self : Any )-> Tuple:
lowerCAmelCase__ : str = '''google/owlvit-base-patch32'''
lowerCAmelCase__ : int = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = [['''cat''', '''nasa badge'''], ['''person''']]
lowerCAmelCase__ : Union[str, Any] = processor(text=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = 16
lowerCAmelCase__ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = max([len(_SCREAMING_SNAKE_CASE ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__( self : List[str] )-> str:
lowerCAmelCase__ : Dict = '''google/owlvit-base-patch32'''
lowerCAmelCase__ : Union[str, Any] = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = ['''cat''', '''nasa badge''']
lowerCAmelCase__ : Union[str, Any] = processor(text=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = 16
lowerCAmelCase__ : Optional[int] = inputs['''input_ids''']
lowerCAmelCase__ : str = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase__( self : Optional[Any] )-> List[str]:
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : str = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = self.prepare_image_inputs()
lowerCAmelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase__ : List[Any] = processor(images=_SCREAMING_SNAKE_CASE , query_images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__( self : int )-> Dict:
lowerCAmelCase__ : int = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : Tuple = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ : Optional[Any] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 131 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
SCREAMING_SNAKE_CASE__:Tuple = True
except ImportError:
SCREAMING_SNAKE_CASE__:Tuple = False
try:
from torch.hub import _get_torch_home
SCREAMING_SNAKE_CASE__:str = _get_torch_home()
except ImportError:
SCREAMING_SNAKE_CASE__:List[str] = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
SCREAMING_SNAKE_CASE__:Optional[Any] = os.path.join(torch_cache_home, """transformers""")
SCREAMING_SNAKE_CASE__:str = """https://cdn.huggingface.co"""
SCREAMING_SNAKE_CASE__:List[str] = """https://s3.amazonaws.com/models.huggingface.co/bert"""
SCREAMING_SNAKE_CASE__:Union[str, Any] = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
SCREAMING_SNAKE_CASE__:List[Any] = os.path.join(PATH, """config.yaml""")
SCREAMING_SNAKE_CASE__:str = os.path.join(PATH, """attributes.txt""")
SCREAMING_SNAKE_CASE__:Any = os.path.join(PATH, """objects.txt""")
SCREAMING_SNAKE_CASE__:Union[str, Any] = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
SCREAMING_SNAKE_CASE__:int = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
SCREAMING_SNAKE_CASE__:Union[str, Any] = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
SCREAMING_SNAKE_CASE__:Optional[Any] = """pytorch_model.bin"""
SCREAMING_SNAKE_CASE__:List[Any] = """config.yaml"""
def _lowerCamelCase( a=OBJECTS , a=ATTRIBUTES ):
__a = []
with open(a ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
__a = []
with open(a ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def _lowerCamelCase( a ):
__a = OrderedDict()
with open(a , "rb" ) as f:
__a = pkl.load(a )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
__a = ckp.pop(a )
if isinstance(a , np.ndarray ):
__a = torch.tensor(a )
else:
assert isinstance(a , torch.tensor ), type(a )
__a = v
return r
class snake_case__ :
_snake_case : List[str] = {}
def __init__( self , lowerCamelCase , lowerCamelCase = "root" , lowerCamelCase=0 ):
__a = name
__a = level
__a = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__a = copy.deepcopy(lowerCamelCase )
__a = copy.deepcopy(lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = Config(lowerCamelCase , name=lowerCamelCase , level=level + 1 )
__a = v
setattr(self , lowerCamelCase , lowerCamelCase )
__a = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , lowerCamelCase , lowerCamelCase ):
__a = val
__a = val
__a = key.split("." )
__a = len(lowerCamelCase ) - 1
__a = self._pointer
if len(lowerCamelCase ) > 1:
for i, l in enumerate(lowerCamelCase ):
if hasattr(self , lowerCamelCase ) and isinstance(getattr(self , lowerCamelCase ) , lowerCamelCase ):
setattr(getattr(self , lowerCamelCase ) , ".".join(levels[i:] ) , lowerCamelCase )
if l == last_level:
__a = val
else:
__a = pointer[l]
def a__ ( self ):
return self._pointer
def a__ ( self , lowerCamelCase , lowerCamelCase ):
with open(F"{file_name}" , "w" ) as stream:
dump(lowerCamelCase , lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
with open(F"{file_name}" , "w" ) as stream:
json.dump(lowerCamelCase , lowerCamelCase )
@staticmethod
def a__ ( lowerCamelCase ):
with open(lowerCamelCase ) as stream:
__a = load(lowerCamelCase , Loader=lowerCamelCase )
return data
def __str__( self ):
__a = " "
if self._name != "root":
__a = F"{t * (self._level-1)}{self._name}:\n"
else:
__a = ""
__a = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(lowerCamelCase , lowerCamelCase ):
r += F"{t * (self._level)}{v}\n"
self._level += 1
else:
r += F"{t * (self._level)}{k}: {v} ({type(lowerCamelCase ).__name__})\n"
__a = level
return r[:-1]
@classmethod
def a__ ( cls , lowerCamelCase , **lowerCamelCase ):
__a , __a = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
return cls(lowerCamelCase )
@classmethod
def a__ ( cls , lowerCamelCase , **lowerCamelCase ):
__a = kwargs.pop("cache_dir" , lowerCamelCase )
__a = kwargs.pop("force_download" , lowerCamelCase )
__a = kwargs.pop("resume_download" , lowerCamelCase )
__a = kwargs.pop("proxies" , lowerCamelCase )
__a = kwargs.pop("local_files_only" , lowerCamelCase )
if os.path.isdir(lowerCamelCase ):
__a = os.path.join(lowerCamelCase , lowerCamelCase )
elif os.path.isfile(lowerCamelCase ) or is_remote_url(lowerCamelCase ):
__a = pretrained_model_name_or_path
else:
__a = hf_bucket_url(lowerCamelCase , filename=lowerCamelCase , use_cdn=lowerCamelCase )
try:
# Load from URL or cache if already cached
__a = cached_path(
lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , local_files_only=lowerCamelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__a = Config.load_yaml(lowerCamelCase )
except EnvironmentError:
__a = "Can't load config for"
raise EnvironmentError(lowerCamelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(lowerCamelCase ), kwargs
def _lowerCamelCase( a ):
__a = torch.load("dump.pt" , map_location=in_tensor.device )
__a = in_tensor.numpy()
__a = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(a , a , rtol=0.01 , atol=0.1 ), (
F"{sum([1 for x in np.isclose(a , a , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_0_0:.4f} %"
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def _lowerCamelCase( a ):
__a = urlparse(a )
return parsed.scheme in ("http", "https")
def _lowerCamelCase( a , a , a=True ):
__a = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__a = "/" not in model_id
if legacy_format:
return F"{endpoint}/{model_id}-{filename}"
else:
return F"{endpoint}/{model_id}/{filename}"
def _lowerCamelCase( a , a , a=None , a=0 , a=None , ):
__a = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(a , a ):
ua += "; " + "; ".join("{}/{}".format(a , a ) for k, v in user_agent.items() )
elif isinstance(a , a ):
ua += "; " + user_agent
__a = {"user-agent": ua}
if resume_size > 0:
__a = "bytes=%d-" % (resume_size,)
__a = requests.get(a , stream=a , proxies=a , headers=a )
if response.status_code == 4_1_6: # Range not satisfiable
return
__a = response.headers.get("Content-Length" )
__a = resume_size + int(a ) if content_length is not None else None
__a = tqdm(
unit="B" , unit_scale=a , total=a , initial=a , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1_0_2_4 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(a ) )
temp_file.write(a )
progress.close()
def _lowerCamelCase( a , a=None , a=False , a=None , a=1_0 , a=False , a=None , a=False , ):
if cache_dir is None:
__a = TRANSFORMERS_CACHE
if isinstance(a , a ):
__a = str(a )
os.makedirs(a , exist_ok=a )
__a = None
if not local_files_only:
try:
__a = requests.head(a , allow_redirects=a , proxies=a , timeout=a )
if response.status_code == 2_0_0:
__a = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__a = url_to_filename(a , a )
# get cache path to put the file
__a = os.path.join(a , a )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(a ):
return cache_path
else:
__a = [
file
for file in fnmatch.filter(os.listdir(a ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(a ) > 0:
return os.path.join(a , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(a ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__a = cache_path + ".lock"
with FileLock(a ):
# If the download just completed while the lock was activated.
if os.path.exists(a ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__a = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(a , "a+b" ) as f:
yield f
__a = _resumable_file_manager
if os.path.exists(a ):
__a = os.stat(a ).st_size
else:
__a = 0
else:
__a = partial(tempfile.NamedTemporaryFile , dir=a , delete=a )
__a = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , a , temp_file.name , )
http_get(
a , a , proxies=a , resume_size=a , user_agent=a , )
os.replace(temp_file.name , a )
__a = {"url": url, "etag": etag}
__a = cache_path + ".json"
with open(a , "w" ) as meta_file:
json.dump(a , a )
return cache_path
def _lowerCamelCase( a , a=None ):
__a = url.encode("utf-8" )
__a = shaaaa(a )
__a = url_hash.hexdigest()
if etag:
__a = etag.encode("utf-8" )
__a = shaaaa(a )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def _lowerCamelCase( a , a=None , a=False , a=None , a=False , a=None , a=False , a=False , a=False , ):
if cache_dir is None:
__a = TRANSFORMERS_CACHE
if isinstance(a , a ):
__a = str(a )
if isinstance(a , a ):
__a = str(a )
if is_remote_url(a ):
# URL, so get it from the cache (downloading if necessary)
__a = get_from_cache(
a , cache_dir=a , force_download=a , proxies=a , resume_download=a , user_agent=a , local_files_only=a , )
elif os.path.exists(a ):
# File, and it exists.
__a = url_or_filename
elif urlparse(a ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(a ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(a ) )
if extract_compressed_file:
if not is_zipfile(a ) and not tarfile.is_tarfile(a ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__a , __a = os.path.split(a )
__a = output_file.replace("." , "-" ) + "-extracted"
__a = os.path.join(a , a )
if os.path.isdir(a ) and os.listdir(a ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__a = output_path + ".lock"
with FileLock(a ):
shutil.rmtree(a , ignore_errors=a )
os.makedirs(a )
if is_zipfile(a ):
with ZipFile(a , "r" ) as zip_file:
zip_file.extractall(a )
zip_file.close()
elif tarfile.is_tarfile(a ):
__a = tarfile.open(a )
tar_file.extractall(a )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(a ) )
return output_path_extracted
return output_path
def _lowerCamelCase( a , a="," ):
assert isinstance(a , a )
if os.path.isfile(a ):
with open(a ) as f:
__a = eval(f.read() )
else:
__a = requests.get(a )
try:
__a = requests.json()
except Exception:
__a = req.content.decode()
assert data is not None, "could not connect"
try:
__a = eval(a )
except Exception:
__a = data.split("\n" )
req.close()
return data
def _lowerCamelCase( a ):
__a = requests.get(a )
__a = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _lowerCamelCase( a ):
__a = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(a )
with open(a , "rb" ) as stream:
__a = pkl.load(a )
__a = weights.pop("model" )
__a = {}
for k, v in model.items():
__a = torch.from_numpy(a )
if "running_var" in k:
__a = torch.tensor([0] )
__a = k.replace("running_var" , "num_batches_tracked" )
__a = zero
return new
def _lowerCamelCase( ):
print(F"{os.path.abspath(os.path.join(a , os.pardir ) )}/demo.ipynb" )
def _lowerCamelCase( a , a="RGB" ):
assert isinstance(a , a )
if os.path.isfile(a ):
__a = cva.imread(a )
else:
__a = get_image_from_url(a )
assert img is not None, F"could not connect to: {im}"
__a = cva.cvtColor(a , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__a = img[:, :, ::-1]
return img
def _lowerCamelCase( a , a=1 ):
return (images[i : i + batch] for i in range(0 , len(a ) , a ))
| 268 | """simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {"""vocab_file""": """vocab.json"""}
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
SCREAMING_SNAKE_CASE__:Optional[Any] = {"""mgp-str""": 27}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCamelCase , lowerCamelCase="[GO]" , lowerCamelCase="[GO]" , lowerCamelCase="[s]" , lowerCamelCase="[GO]" , **lowerCamelCase ):
super().__init__(
unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__a = json.load(lowerCamelCase )
__a = {v: k for k, v in self.vocab.items()}
@property
def a__ ( self ):
return len(self.vocab )
def a__ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def a__ ( self , lowerCamelCase ):
__a = []
for s in text:
char_tokens.extend(lowerCamelCase )
return char_tokens
def a__ ( self , lowerCamelCase ):
return self.vocab.get(lowerCamelCase , self.vocab.get(self.unk_token ) )
def a__ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCamelCase ) )
return
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
return (vocab_file,)
| 268 | 1 |
import re
from ..utils import cached_file
# docstyle-ignore
_a = '''\nHuman: <<task>>\n\nAssistant: '''
_a = '''huggingface-tools/default-prompts'''
_a = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="run" )-> Any:
"""simple docstring"""
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , _a ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_a , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(_a , 'r' , encoding='utf-8' ) as f:
return f.read()
| 39 |
'''simple docstring'''
import os
from distutils.util import strtobool
def snake_case_ (_a : Union[str, Any] , _a : List[Any] ):
for e in env_keys:
UpperCAmelCase = int(os.environ.get(_a , -1 ) )
if val >= 0:
return val
return default
def snake_case_ (_a : Dict , _a : Any=False ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case_ (_a : str , _a : Optional[Any]="no" ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return value
| 34 | 0 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase : Optional[Any] = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase ):
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" ,snake_case ,)
super().__init__(*snake_case ,**snake_case )
| 285 |
from ...processing_utils import ProcessorMixin
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "WhisperFeatureExtractor"
_a : int= "WhisperTokenizer"
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
super().__init__(snake_case ,snake_case )
lowercase : Optional[int] = self.feature_extractor
lowercase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ,snake_case=None ,snake_case=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=snake_case ,language=snake_case ,no_timestamps=snake_case )
def __call__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*snake_case ,**snake_case )
lowercase : Optional[Any] = kwargs.pop("""audio""" ,snake_case )
lowercase : str = kwargs.pop("""sampling_rate""" ,snake_case )
lowercase : Dict = kwargs.pop("""text""" ,snake_case )
if len(snake_case ) > 0:
lowercase : List[Any] = args[0]
lowercase : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowercase : Any = self.feature_extractor(snake_case ,*snake_case ,sampling_rate=snake_case ,**snake_case )
if text is not None:
lowercase : str = self.tokenizer(snake_case ,**snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase : List[Any] = encodings["""input_ids"""]
return inputs
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case="np" ):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(snake_case ,return_tensors=snake_case )
| 285 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :int = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[str] = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[Any] = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Optional[int] = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[str] = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( lowerCamelCase__ ):
'''simple docstring'''
if num <= 0:
A_ : List[Any] = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase__ )
A_ : Dict = [True] * (num + 1)
A_ : List[Any] = []
A_ : Tuple = 2
A_ : Optional[int] = int(math.sqrt(lowerCamelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase__ ):
if sieve[i] is True:
A_ : List[Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCamelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip()))) | 206 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = KandinskyVaaControlnetPipeline
UpperCAmelCase_ = ['image_embeds', 'negative_image_embeds', 'hint']
UpperCAmelCase_ = ['image_embeds', 'negative_image_embeds', 'hint']
UpperCAmelCase_ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCAmelCase_ = False
@property
def __snake_case ( self : Optional[int]):
'''simple docstring'''
return 32
@property
def __snake_case ( self : str):
'''simple docstring'''
return 32
@property
def __snake_case ( self : str):
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case ( self : Tuple):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case ( self : str):
'''simple docstring'''
return 100
@property
def __snake_case ( self : List[str]):
'''simple docstring'''
torch.manual_seed(0)
lowerCAmelCase__ = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ = UNetaDConditionModel(**lowercase__)
return model
@property
def __snake_case ( self : Optional[int]):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
torch.manual_seed(0)
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase__ , )
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __snake_case ( self : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Dict=0):
'''simple docstring'''
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase__)).to(lowercase__)
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase__)
# create hint
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase__)).to(lowercase__)
if str(lowercase__).startswith('mps'):
lowerCAmelCase__ = torch.manual_seed(lowercase__)
else:
lowerCAmelCase__ = torch.Generator(device=lowercase__).manual_seed(lowercase__)
lowerCAmelCase__ = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __snake_case ( self : int):
'''simple docstring'''
lowerCAmelCase__ = 'cpu'
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**lowercase__)
lowerCAmelCase__ = pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(lowercase__))
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(lowercase__) , return_dict=lowercase__ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Dict):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
lowerCAmelCase__ = torch.from_numpy(np.array(lowercase__)).float() / 255.0
lowerCAmelCase__ = hint.permute(2 , 0 , 1).unsqueeze(0)
lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase__)
lowerCAmelCase__ = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa)
lowerCAmelCase__ = pipeline.to(lowercase__)
pipeline.set_progress_bar_config(disable=lowercase__)
lowerCAmelCase__ = 'A robot, 4k photo'
lowerCAmelCase__ = torch.Generator(device='cuda').manual_seed(0)
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
lowercase__ , generator=lowercase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ = torch.Generator(device='cuda').manual_seed(0)
lowerCAmelCase__ = pipeline(
image_embeds=lowercase__ , negative_image_embeds=lowercase__ , hint=lowercase__ , generator=lowercase__ , num_inference_steps=100 , output_type='np' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__)
| 362 | import argparse
from collections import defaultdict
import yaml
lowerCAmelCase__ = 'docs/source/en/_toctree.yml'
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase__ = [key for key, value in counts.items() if value > 1]
lowerCAmelCase__ = []
for duplicate_key in duplicates:
lowerCAmelCase__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def __lowerCamelCase ( lowerCAmelCase__=False ):
with open(lowerCAmelCase__ , encoding='utf-8' ) as f:
lowerCAmelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase__ = content[api_idx]['sections']
# Then to the model doc
lowerCAmelCase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase__ = api_doc[model_idx]['sections']
lowerCAmelCase__ = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if 'sections' in section]
lowerCAmelCase__ = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase__ = modality_doc['sections']
lowerCAmelCase__ = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowerCAmelCase__ = True
if overwrite:
lowerCAmelCase__ = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase__ = model_doc
lowerCAmelCase__ = api_doc
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 119 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' ,['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' ,['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' ,[None, 'v2'] )
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
__UpperCamelCase : Tuple =hf_hub_url(repo_id=a_ ,path=a_ ,revision=a_ )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a_ )}'
| 71 |
'''simple docstring'''
import os
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( UpperCAmelCase_ ):
if not nums:
return 0
_UpperCamelCase : Any = nums[0]
_UpperCamelCase : Optional[int] = 0
for num in nums[1:]:
_UpperCamelCase , _UpperCamelCase : Tuple = (
max_excluding + num,
max(UpperCAmelCase_ , UpperCAmelCase_ ),
)
return max(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : int = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_UpperCamelCase : Any = 1_0_2_4
_UpperCamelCase : List[Any] = 4_0_9_6
_UpperCamelCase : List[str] = 2_4
_UpperCamelCase : Tuple = 1_6
_UpperCamelCase : Union[str, Any] = [5, 1_1, 1_7, 2_3]
_UpperCamelCase : Any = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
_UpperCamelCase : Tuple = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
_UpperCamelCase : Optional[int] = 7_6_8
_UpperCamelCase : Optional[Any] = [1, 1, 1, 0.5]
_UpperCamelCase : List[Any] = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
_UpperCamelCase : Optional[int] = 1_5_0
_UpperCamelCase : Tuple = 1_6
_UpperCamelCase : Dict = (1, 3_8_4, 3_8_4)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = 'project'
if "ade" in checkpoint_url:
_UpperCamelCase : Dict = True
_UpperCamelCase : Dict = 7_6_8
_UpperCamelCase : Union[str, Any] = [1, 1, 1, 0.5]
_UpperCamelCase : Union[str, Any] = 1_5_0
_UpperCamelCase : str = 1_6
_UpperCamelCase : Tuple = 'huggingface/label-files'
_UpperCamelCase : Tuple = 'ade20k-id2label.json'
_UpperCamelCase : Tuple = json.load(open(cached_download(hf_hub_url(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) ) , 'r' ) )
_UpperCamelCase : str = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : List[str] = idalabel
_UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_UpperCamelCase : int = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCamelCase : List[str] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_UpperCamelCase : int = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_UpperCamelCase : Any = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_UpperCamelCase : Tuple = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_UpperCamelCase : List[str] = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_UpperCamelCase : int = name.replace('proj' , 'projection' )
if "blocks" in name:
_UpperCamelCase : List[str] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_UpperCamelCase : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCamelCase : str = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_UpperCamelCase : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_UpperCamelCase : str = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_UpperCamelCase : Dict = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_UpperCamelCase : List[str] = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_UpperCamelCase : List[str] = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_UpperCamelCase : Any = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_UpperCamelCase : int = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_UpperCamelCase : Dict = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_UpperCamelCase : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCamelCase : str = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
_UpperCamelCase : Dict = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_UpperCamelCase : Union[str, Any] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_UpperCamelCase : Union[str, Any] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_UpperCamelCase : int = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_UpperCamelCase : Dict = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCamelCase : str = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCamelCase : Optional[int] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCamelCase : Optional[int] = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_UpperCamelCase : List[str] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_UpperCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_UpperCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_UpperCamelCase : Dict = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_UpperCamelCase : List[str] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_UpperCamelCase : int = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_UpperCamelCase : Union[str, Any] = name.replace('bn' , 'batch_norm' )
if "head" in name:
_UpperCamelCase : Dict = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_UpperCamelCase : str = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_UpperCamelCase : Any = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_UpperCamelCase : List[Any] = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_UpperCamelCase : Dict = name.replace('..' , '.' )
if "stem.conv" in name:
_UpperCamelCase : Tuple = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_UpperCamelCase : Optional[int] = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_UpperCamelCase : List[str] = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_UpperCamelCase : Union[str, Any] = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_UpperCamelCase : Dict = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_UpperCamelCase : str = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_UpperCamelCase : Tuple = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase : List[str] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
_UpperCamelCase : List[str] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCamelCase : int = in_proj_bias[: config.hidden_size]
_UpperCamelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def A__ ( ):
_UpperCamelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : List[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : int = get_dpt_config(UpperCAmelCase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_UpperCamelCase : List[str] = torch.load(UpperCAmelCase_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
_UpperCamelCase : Any = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : int = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
_UpperCamelCase : Union[str, Any] = DPTForSemanticSegmentation(UpperCAmelCase_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
# Check outputs on an image
_UpperCamelCase : Tuple = 4_8_0 if 'ade' in checkpoint_url else 3_8_4
_UpperCamelCase : Any = DPTImageProcessor(size=UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : Optional[int] = image_processor(UpperCAmelCase_ , return_tensors='pt' )
# forward pass
_UpperCamelCase : Optional[Any] = model(**UpperCAmelCase_ ).logits if 'ade' in checkpoint_url else model(**UpperCAmelCase_ ).predicted_depth
if show_prediction:
_UpperCamelCase : List[str] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=UpperCAmelCase_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
snake_case_ : Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 236 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A__ ( A__ ):
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =5
# Realm tok
_SCREAMING_SNAKE_CASE =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(_a , exist_ok=_a )
def A ( self : Union[str, Any] ) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def A ( self : int ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =RealmConfig(num_block_records=self.num_block_records )
return config
def A ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def A ( self : int ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=_a , )
return block_records
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def A ( self : str ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_config()
_SCREAMING_SNAKE_CASE =self.get_dummy_retriever()
_SCREAMING_SNAKE_CASE =retriever.tokenizer
_SCREAMING_SNAKE_CASE =np.array([0, 3] , dtype='long' )
_SCREAMING_SNAKE_CASE =tokenizer(['Test question'] ).input_ids
_SCREAMING_SNAKE_CASE =tokenizer(
['the fourth'] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids
_SCREAMING_SNAKE_CASE =config.reader_seq_len
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever(
_a , _a , answer_ids=_a , max_length=_a , return_tensors='np' )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def A ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_config()
_SCREAMING_SNAKE_CASE =self.get_dummy_retriever()
_SCREAMING_SNAKE_CASE =retriever.tokenizer
_SCREAMING_SNAKE_CASE =np.array([0, 3, 5] , dtype='long' )
_SCREAMING_SNAKE_CASE =tokenizer(['Test question'] ).input_ids
_SCREAMING_SNAKE_CASE =tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids
_SCREAMING_SNAKE_CASE =config.reader_seq_len
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever(
_a , _a , answer_ids=_a , max_length=_a , return_tensors='np' )
self.assertEqual([False, True, True] , _a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _a )
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
_SCREAMING_SNAKE_CASE =retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
_SCREAMING_SNAKE_CASE =os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
_SCREAMING_SNAKE_CASE =RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
| 47 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class A__ :
A__ = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
A__ = field(
default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A__ = field(
default=A__ , metadata={'help': 'The column name of the images in the files.'} )
A__ = field(default=A__ , metadata={'help': 'A folder containing the training data.'} )
A__ = field(default=A__ , metadata={'help': 'A folder containing the validation data.'} )
A__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A__ = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
if self.train_dir is not None:
_SCREAMING_SNAKE_CASE =self.train_dir
if self.validation_dir is not None:
_SCREAMING_SNAKE_CASE =self.validation_dir
_SCREAMING_SNAKE_CASE =data_files if data_files else None
@dataclass
class A__ :
A__ = field(
default=A__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
A__ = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
A__ = field(
default=A__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
A__ = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ = field(default=A__ , metadata={'help': 'Name or path of preprocessor config.'} )
A__ = field(
default=A__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A__ = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
A__ = field(
default=A__ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class A__ ( A__ ):
A__ = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def _lowerCAmelCase ( _UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE =training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_SCREAMING_SNAKE_CASE =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_SCREAMING_SNAKE_CASE =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
_SCREAMING_SNAKE_CASE =ds['train'].train_test_split(data_args.train_val_split )
_SCREAMING_SNAKE_CASE =split['train']
_SCREAMING_SNAKE_CASE =split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_SCREAMING_SNAKE_CASE =ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_SCREAMING_SNAKE_CASE =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_SCREAMING_SNAKE_CASE =ViTMAEForPreTraining(_UpperCamelCase )
if training_args.do_train:
_SCREAMING_SNAKE_CASE =ds['train'].column_names
else:
_SCREAMING_SNAKE_CASE =ds['validation'].column_names
if data_args.image_column_name is not None:
_SCREAMING_SNAKE_CASE =data_args.image_column_name
elif "image" in column_names:
_SCREAMING_SNAKE_CASE ='image'
elif "img" in column_names:
_SCREAMING_SNAKE_CASE ='img'
else:
_SCREAMING_SNAKE_CASE =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_SCREAMING_SNAKE_CASE =image_processor.size['shortest_edge']
else:
_SCREAMING_SNAKE_CASE =(image_processor.size['height'], image_processor.size['width'])
_SCREAMING_SNAKE_CASE =Compose(
[
Lambda(lambda _UpperCamelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_UpperCamelCase : Dict ):
_SCREAMING_SNAKE_CASE =[transforms(_UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCamelCase )
# Compute absolute learning rate
_SCREAMING_SNAKE_CASE =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_SCREAMING_SNAKE_CASE =training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE =None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE =last_checkpoint
_SCREAMING_SNAKE_CASE =trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE =trainer.evaluate()
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE ={
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 47 | 1 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( a__ ):
lowerCamelCase : int =['''image_processor''', '''tokenizer''']
lowerCamelCase : int ='''AutoImageProcessor'''
lowerCamelCase : str ='''AutoTokenizer'''
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Tuple:
a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase__ , )
a : Tuple = kwargs.pop("feature_extractor" )
a : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = self.image_processor
a : Any = False
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
a : Union[str, Any] = kwargs.pop("images" , lowerCAmelCase__ )
a : Tuple = kwargs.pop("text" , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
a : Dict = args[0]
a : str = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
a : Optional[Any] = self.image_processor(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
a : List[str] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
a : Optional[Any] = encodings["input_ids"]
return inputs
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@contextmanager
def __a ( self ) -> Optional[Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
a : Optional[Any] = True
a : List[Any] = self.tokenizer
yield
a : Union[str, Any] = self.image_processor
a : int = False
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=None ) -> str:
if added_vocab is None:
a : str = self.tokenizer.get_added_vocab()
a : int = {}
while tokens:
a : Union[str, Any] = re.search(R"<s_(.*?)>" , lowerCAmelCase__ , re.IGNORECASE )
if start_token is None:
break
a : Union[str, Any] = start_token.group(1 )
a : Union[str, Any] = re.search(Rf"""</s_{key}>""" , lowerCAmelCase__ , re.IGNORECASE )
a : Tuple = start_token.group()
if end_token is None:
a : Any = tokens.replace(lowerCAmelCase__ , "" )
else:
a : Any = end_token.group()
a : Tuple = re.escape(lowerCAmelCase__ )
a : Any = re.escape(lowerCAmelCase__ )
a : Optional[Any] = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , lowerCAmelCase__ , re.IGNORECASE )
if content is not None:
a : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
a : Optional[Any] = self.tokenajson(lowerCAmelCase__ , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ )
if value:
if len(lowerCAmelCase__ ) == 1:
a : str = value[0]
a : Tuple = value
else: # leaf nodes
a : List[Any] = []
for leaf in content.split(R"<sep/>" ):
a : Union[str, Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
a : Tuple = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase__ )
if len(output[key] ) == 1:
a : Tuple = output[key][0]
a : Tuple = tokens[tokens.find(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ )
if len(lowerCAmelCase__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __a ( self ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __a ( self ) -> Dict:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase__ , )
return self.image_processor
| 367 |
"""simple docstring"""
a : Optional[int] = 8.31_4462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79 | 0 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0**1_2 ):
_UpperCamelCase : str = 1
_UpperCamelCase : Any = 0
_UpperCamelCase : List[str] = 1
_UpperCamelCase : Any = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : List[str] = 5_0000
_lowerCamelCase : Optional[int] = 5000
_lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__)
_lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
UpperCamelCase = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
UpperCamelCase = generate_example_dataset(
os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
UpperCamelCase = func(A__ , **A__ )
print('shuffling dataset' )
UpperCamelCase = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(A__ ) )
UpperCamelCase = func(
A__ , **A__ )
with open(A__ , 'wb' ) as f:
f.write(json.dumps(A__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 28 | 0 |
_lowerCamelCase : dict[tuple[int, int, int], int] = {}
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE__ : Tuple = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE__ : Dict = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE__ : Any = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , 0 )
SCREAMING_SNAKE_CASE__ : str = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE__ : Optional[int] = prizestrings
return prizestrings
def _a ( SCREAMING_SNAKE_CASE__ : int = 30 ) -> int:
'''simple docstring'''
return _calculate(SCREAMING_SNAKE_CASE__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 191 |
from __future__ import annotations
import time
_lowerCamelCase : Tuple = list[tuple[int, int]]
_lowerCamelCase : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCamelCase : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int], _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : Node | None ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = pos_x
SCREAMING_SNAKE_CASE__ : List[Any] = pos_y
SCREAMING_SNAKE_CASE__ : str = (pos_y, pos_x)
SCREAMING_SNAKE_CASE__ : Dict = goal_x
SCREAMING_SNAKE_CASE__ : List[Any] = goal_y
SCREAMING_SNAKE_CASE__ : str = parent
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict, _UpperCAmelCase : tuple[int, int], _UpperCAmelCase : tuple[int, int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Node(start[1], start[0], goal[1], goal[0], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Node(goal[1], goal[0], goal[1], goal[0], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.start]
SCREAMING_SNAKE_CASE__ : str = False
def A_ ( self : Dict ) -> Path | None:
"""simple docstring"""
while self.node_queue:
SCREAMING_SNAKE_CASE__ : int = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE__ : Dict = True
return self.retrace_path(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_successors(_UpperCAmelCase )
for node in successors:
self.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def A_ ( self : Any, _UpperCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE__ : str = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_UpperCAmelCase, _UpperCAmelCase, self.target.pos_y, self.target.pos_x, _UpperCAmelCase ) )
return successors
def A_ ( self : List[str], _UpperCAmelCase : Node | None ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = node
SCREAMING_SNAKE_CASE__ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = current_node.parent
path.reverse()
return path
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = BreadthFirstSearch(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BreadthFirstSearch(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = False
def A_ ( self : List[str] ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE__ : int = self.fwd_bfs.node_queue.pop(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE__ : List[Any] = True
return self.retrace_bidirectional_path(
_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = current_bwd_node
SCREAMING_SNAKE_CASE__ : Union[str, Any] = current_fwd_node
SCREAMING_SNAKE_CASE__ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(_UpperCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(_UpperCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def A_ ( self : int, _UpperCAmelCase : Node, _UpperCAmelCase : Node ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.fwd_bfs.retrace_path(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self.bwd_bfs.retrace_path(_UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_lowerCamelCase : Optional[Any] = (0, 0)
_lowerCamelCase : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_lowerCamelCase : Dict = time.time()
_lowerCamelCase : List[Any] = BreadthFirstSearch(init, goal)
_lowerCamelCase : Optional[Any] = bfs.search()
_lowerCamelCase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
_lowerCamelCase : str = time.time()
_lowerCamelCase : Dict = BidirectionalBreadthFirstSearch(init, goal)
_lowerCamelCase : Optional[Any] = bd_bfs.search()
_lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 191 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __A ( a_ :int , a_ :List[str] , a_ :str , a_ :Any , ) -> list[float]:
__a : str = coefficient_matrix.shape
__a : Any = constant_matrix.shape
if rowsa != colsa:
__a : Union[str, Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__lowerCamelCase)
if colsa != 1:
__a : List[Any] = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__lowerCamelCase)
if rowsa != rowsa:
__a : Dict = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__lowerCamelCase)
if len(__lowerCamelCase) != rowsa:
__a : List[Any] = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(__lowerCamelCase)} and {rowsa}"""
)
raise ValueError(__lowerCamelCase)
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''')
__a : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
__a : Dict = table.shape
strictly_diagonally_dominant(__lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(__lowerCamelCase):
__a : Optional[Any] = []
for row in range(__lowerCamelCase):
__a : Optional[int] = 0
for col in range(__lowerCamelCase):
if col == row:
__a : List[Any] = table[row][col]
elif col == cols - 1:
__a : List[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__a : Any = (temp + val) / denom
new_val.append(__lowerCamelCase)
__a : int = new_val
return [float(__lowerCamelCase) for i in new_val]
def __A ( a_ :List[Any]) -> bool:
__a : Tuple = table.shape
__a : Dict = True
for i in range(0 , __lowerCamelCase):
__a : List[str] = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''')
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | 160 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ):
"""simple docstring"""
_snake_case : Dict = AutoTokenizer.from_pretrained(snake_case__ )
_snake_case : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Dict = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_snake_case : int = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_snake_case : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
_snake_case : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Tuple ):
"""simple docstring"""
model.eval()
_snake_case : Any = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : List[Any] = model(**snake_case__ )
_snake_case : int = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_snake_case , _snake_case : str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
_snake_case : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_snake_case : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
_snake_case : Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Optional[Any] = config["""lr"""]
_snake_case : List[Any] = int(config["""num_epochs"""] )
_snake_case : Union[str, Any] = int(config["""seed"""] )
_snake_case : str = int(config["""batch_size"""] )
_snake_case : str = args.model_name_or_path
set_seed(snake_case__ )
_snake_case , _snake_case : List[str] = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : str = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
_snake_case : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_snake_case : Any = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
_snake_case : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_snake_case : Optional[int] = 1
_snake_case : Optional[int] = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_snake_case : Optional[int] = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
_snake_case : Dict = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Dict = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
_snake_case : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
_snake_case : str = 0
_snake_case : int = evaluate.load("""glue""" , """mrpc""" )
_snake_case : List[str] = num_epochs
if args.partial_train_epoch is not None:
_snake_case : List[str] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_snake_case : str = args.resume_from_checkpoint.split("""epoch_""" )[1]
_snake_case : List[str] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_snake_case : str = int(snake_case__ ) + 1
_snake_case : Optional[Any] = evaluation_loop(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
accelerator.print("""resumed checkpoint performance:""" , snake_case__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F"state_{starting_epoch-1}.json" ) , """r""" ) as f:
_snake_case : List[str] = json.load(snake_case__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_snake_case : Optional[int] = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
_snake_case : int = model(**snake_case__ )
_snake_case : Optional[Any] = outputs.loss
_snake_case : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_snake_case : Dict = F"epoch_{epoch}"
_snake_case : Dict = os.path.join(args.output_dir , snake_case__ )
accelerator.save_state(snake_case__ )
_snake_case : Optional[int] = evaluation_loop(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_snake_case : Tuple = accuracy
_snake_case : Union[str, Any] = lr_scheduler.get_lr()[0]
_snake_case : Optional[Any] = optimizer.param_groups[0]["""lr"""]
_snake_case : List[str] = epoch
_snake_case : Any = overall_step
accelerator.print(F"epoch {epoch}:" , snake_case__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"state_{epoch}.json" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case__ , )
parser.add_argument(
"""--output_dir""" , type=snake_case__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=snake_case__ , default=snake_case__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=snake_case__ , default=snake_case__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case__ , default=2 , help="""Number of train epochs.""" , )
_snake_case : Optional[Any] = parser.parse_args()
_snake_case : List[str] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 132 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[int] ):
'''simple docstring'''
super().__init__()
_snake_case : List[str] = nn.Linear(3, 4 )
_snake_case : int = nn.BatchNormad(4 )
_snake_case : List[str] = nn.Linear(4, 5 )
def UpperCamelCase_ ( self: Any, a_: Union[str, Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(a_ ) ) )
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: Any, a_: int, *a_: Dict, **a_: Dict ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: str, a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
return output + 1
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = ModelForTest()
_snake_case : List[str] = ModelHook()
add_hook_to_module(a_, a_ )
self.assertEqual(test_model._hf_hook, a_ )
self.assertTrue(hasattr(a_, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(a_ )
self.assertFalse(hasattr(a_, """_hf_hook""" ) )
self.assertFalse(hasattr(a_, """_old_forward""" ) )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = ModelForTest()
_snake_case : List[Any] = ModelHook()
add_hook_to_module(a_, a_ )
add_hook_to_module(a_, a_, append=a_ )
self.assertEqual(isinstance(test_model._hf_hook, a_ ), a_ )
self.assertEqual(len(test_model._hf_hook.hooks ), 2 )
self.assertTrue(hasattr(a_, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(a_ )
self.assertFalse(hasattr(a_, """_hf_hook""" ) )
self.assertFalse(hasattr(a_, """_old_forward""" ) )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = ModelForTest()
_snake_case : Optional[Any] = torch.randn(2, 3 )
_snake_case : List[Any] = test_model(x + 1 )
_snake_case : List[str] = test_model(x + 2 )
_snake_case : Any = PreForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : List[Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, a_, atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case : List[str] = PreForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Tuple = test_model(a_ )
self.assertTrue(torch.allclose(a_, a_, atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case : str = SequentialHook(PreForwardHook(), PreForwardHook() )
add_hook_to_module(a_, a_ )
_snake_case : str = test_model(a_ )
assert torch.allclose(a_, a_, atol=1E-5 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = ModelForTest()
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : List[str] = test_model(a_ )
_snake_case : List[Any] = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Union[str, Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1, atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case : Tuple = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Optional[Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1, atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case : Dict = SequentialHook(PostForwardHook(), PostForwardHook() )
add_hook_to_module(a_, a_ )
_snake_case : List[str] = test_model(a_ )
assert torch.allclose(a_, output + 2, atol=1E-5 )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : str = ModelForTest()
_snake_case : Any = torch.randn(2, 3 )
_snake_case : List[str] = test_model(a_ )
_snake_case : List[Any] = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Dict = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1 ) )
self.assertTrue(outputa.requires_grad )
_snake_case : Union[str, Any] = True
_snake_case : Dict = test_model(a_ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device(0 ) )
self.assertEqual(model.lineara.weight.device, torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_snake_case : Any = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(a_, AlignDevicesHook(io_same_device=a_ ) )
_snake_case : int = torch.randn(2, 3 ).to(0 )
_snake_case : Optional[Any] = model(a_ )
self.assertEqual(output.device, torch.device(0 ) )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Optional[int] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Union[str, Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : str = torch.randn(2, 3 )
_snake_case : Optional[int] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_snake_case : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(a_, execution_device=a_, offload=a_ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Optional[int] = torch.device(a_ )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : List[Any] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(a_, execution_device=a_, offload=a_, offload_buffers=a_ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : str = torch.randn(2, 3 )
_snake_case : List[Any] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
a_, execution_device=a_, offload=a_, weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Optional[Any] = torch.device(a_ )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : int = torch.randn(2, 3 )
_snake_case : str = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
a_, execution_device=a_, offload=a_, weights_map=model.state_dict(), offload_buffers=a_, )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : Optional[int] = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
| 132 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : Dict ) -> List[Any]:
'''simple docstring'''
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase_ , lowerCAmelCase_ :int = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase_ :Any = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 84 |
def UpperCAmelCase__ (UpperCamelCase_ = 4_00_00_00 ):
"""simple docstring"""
snake_case = [0, 1]
snake_case = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case = 0
for j in range(len(UpperCamelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 127 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: int = CTRLTokenizer
A: List[Any] = False
A: Dict = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
UpperCamelCase__ : List[str] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCamelCase__ : Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
UpperCamelCase__ : int = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : Tuple , **lowerCamelCase__ : str ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = '''adapt react readapt apt'''
UpperCamelCase__ : Optional[Any] = '''adapt react readapt apt'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ : Optional[Any] = '''adapt react readapt apt'''
UpperCamelCase__ : List[Any] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
UpperCamelCase__ : Tuple = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = tokens + [tokenizer.unk_token]
UpperCamelCase__ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 51 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( __lowerCAmelCase):
A: Optional[int] = "vit"
def __init__( self : Optional[int] , lowerCamelCase__ : Union[str, Any]=768 , lowerCamelCase__ : Optional[int]=12 , lowerCamelCase__ : Optional[int]=12 , lowerCamelCase__ : Any=3072 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : str=1E-1_2 , lowerCamelCase__ : Dict=224 , lowerCamelCase__ : int=16 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Optional[int]=16 , **lowerCamelCase__ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : List[str] = intermediate_size
UpperCamelCase__ : Optional[Any] = hidden_act
UpperCamelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase__ : List[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = layer_norm_eps
UpperCamelCase__ : Optional[int] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : Optional[Any] = num_channels
UpperCamelCase__ : List[Any] = qkv_bias
UpperCamelCase__ : Union[str, Any] = encoder_stride
class __magic_name__ ( __lowerCAmelCase):
A: int = version.parse("1.11")
@property
def UpperCAmelCase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1E-4
| 51 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCamelCase : List[str] = LxmertForPreTraining(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A_, A_, A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 72 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 0 |
"""simple docstring"""
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = current_set.copy()
for row_index, row in enumerate(A__ ):
UpperCAmelCase_ : List[Any] = row[0]
for column_index, column in enumerate(A__ ):
if magnitude == 0:
UpperCAmelCase_ : str = column
continue
UpperCAmelCase_ : Union[str, Any] = column / magnitude
# Subtract to cancel term
UpperCAmelCase_ : str = current_set[0]
UpperCAmelCase_ : int = [first_row]
UpperCAmelCase_ : Optional[Any] = current_set[1::]
for row in current_set:
UpperCAmelCase_ : List[str] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A__ )
continue
for column_index in range(len(A__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase_ : Dict = final_set[0]
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Tuple = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase_ : List[str] = simplify(A__ )
for i in range(len(A__ ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,A__ )
UpperCAmelCase_ : int = resultant
return final_set
def snake_case ( A__ ):
if len(A__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
UpperCAmelCase_ : str = len(A__ ) + 1
if any(len(A__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(A__ ,(int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(A__ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase_ : List[Any] = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase_ : Any = data_set.copy()
UpperCAmelCase_ : Optional[Any] = []
for row_index, row in enumerate(A__ ):
if 0 not in row:
UpperCAmelCase_ : Tuple = data_set.pop(A__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 ,A__ )
UpperCAmelCase_ : Optional[Any] = data_set.copy()
UpperCAmelCase_ : Union[str, Any] = simplify(A__ )
UpperCAmelCase_ : Tuple = simplified[::-1]
UpperCAmelCase_ : list = []
for row in simplified:
UpperCAmelCase_ : Optional[int] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase_ : List[Any] = row.copy()[: len(A__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A__ ) == 0:
solutions.append(0 )
continue
UpperCAmelCase_ : Dict = temp_row[1::]
UpperCAmelCase_ : List[str] = temp_row[::-1]
for column_index, column in enumerate(A__ ):
current_solution -= column * solutions[column_index]
solutions.append(A__ )
UpperCAmelCase_ : Optional[Any] = []
for item in solutions:
final.append(float(round(A__ ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 361 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''table-transformer'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Optional[Any]=100 , lowerCAmelCase_ : Optional[int]=6 , lowerCAmelCase_ : List[Any]=2_048 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : List[Any]=2_048 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=0.0_2 , lowerCAmelCase_ : Any=1.0 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Dict="sine" , lowerCAmelCase_ : Optional[Any]="resnet50" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = backbone_config.get("model_type" )
UpperCAmelCase_ : str = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Optional[Any] = num_queries
UpperCAmelCase_ : List[str] = d_model
UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = encoder_layers
UpperCAmelCase_ : List[str] = encoder_attention_heads
UpperCAmelCase_ : int = decoder_ffn_dim
UpperCAmelCase_ : int = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : Union[str, Any] = activation_dropout
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : int = init_std
UpperCAmelCase_ : Any = init_xavier_std
UpperCAmelCase_ : Union[str, Any] = encoder_layerdrop
UpperCAmelCase_ : Dict = decoder_layerdrop
UpperCAmelCase_ : Union[str, Any] = encoder_layers
UpperCAmelCase_ : Any = auxiliary_loss
UpperCAmelCase_ : List[str] = position_embedding_type
UpperCAmelCase_ : Dict = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Tuple = dilation
# Hungarian matcher
UpperCAmelCase_ : Optional[Any] = class_cost
UpperCAmelCase_ : List[Any] = bbox_cost
UpperCAmelCase_ : Optional[int] = giou_cost
# Loss coefficients
UpperCAmelCase_ : Optional[int] = mask_loss_coefficient
UpperCAmelCase_ : List[str] = dice_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = bbox_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase_ : Dict = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.d_model
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float:
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 12
| 253 | 0 |
from torch import nn
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) ->Optional[Any]:
super().__init__()
snake_case_ = class_size
snake_case_ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : int ) ->Dict:
snake_case_ = self.mlp(_UpperCamelCase )
return logits | 8 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : str = ['input_ids', 'attention_mask']
__lowerCamelCase : Union[str, Any] = RobertaTokenizer
def __init__(self , A=None , A=None , A=None , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , A=True , **A , ) -> List[str]:
"""simple docstring"""
super().__init__(
A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A ) != add_prefix_space:
_a = getattr(A , pre_tok_state.pop('''type''' ) )
_a = add_prefix_space
_a = pre_tok_class(**A )
_a = add_prefix_space
_a = '''post_processor'''
_a = getattr(self.backend_tokenizer , A , A )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state['''sep'''] )
if "cls" in state:
_a = tuple(state['''cls'''] )
_a = False
if state.get('''add_prefix_space''' , A ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get('''trim_offsets''' , A ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(A , state.pop('''type''' ) )
_a = component_class(**A )
setattr(self.backend_tokenizer , A , A )
@property
def a__ (self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
_a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value
_a = value
def a__ (self , *A , **A ) -> BatchEncoding:
"""simple docstring"""
_a = kwargs.get('''is_split_into_words''' , A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A , **A )
def a__ (self , *A , **A ) -> BatchEncoding:
"""simple docstring"""
_a = kwargs.get('''is_split_into_words''' , A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A , **A )
def a__ (self , A , A = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(A , name=A )
return tuple(A )
def a__ (self , A , A=None ) -> List[str]:
"""simple docstring"""
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ (self , A , A = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 211 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ : int = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar("T")
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = []
_a = {}
_a = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def __UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_a = self.elements
self.elements += 1
self._bubble_up(__magic_name__ )
def __UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_a , _a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_a , _a = self.heap[0]
self._bubble_down(__magic_name__ )
return elem
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Update the weight of the given key
_a = self.position_map[elem]
_a = (elem, weight)
if position > 0:
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_a = self.position_map[elem]
if curr_pos == 0:
return None
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[curr_pos]
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_up(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_a = self.position_map[elem]
_a , _a = self.heap[curr_pos]
_a = get_child_left_position(__magic_name__ )
_a = get_child_right_position(__magic_name__ )
if child_left_position < self.elements and child_right_position < self.elements:
_a , _a = self.heap[child_left_position]
_a , _a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
if child_left_position < self.elements:
_a , _a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
else:
return None
if child_right_position < self.elements:
_a , _a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Swap the nodes at the given positions
_a = self.heap[nodea_pos][0]
_a = self.heap[nodea_pos][0]
_a , _a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_a = nodea_pos
_a = nodea_pos
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = {}
_a = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_a = {}
self.nodes += 1
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
_a = weight
_a = weight
def _A (lowerCAmelCase__ :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_a = {node: maxsize for node in graph.connections}
_a = {node: None for node in graph.connections}
_a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_a = priority_queue.extract_min()
_a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
# running prim's algorithm
while not priority_queue.is_empty():
_a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
return dist, parent
| 104 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Any=13 , lowercase_ :str=7 , lowercase_ :Optional[int]=True , lowercase_ :Any=True , lowercase_ :Tuple=True , lowercase_ :Dict=True , lowercase_ :Optional[Any]=99 , lowercase_ :Optional[Any]=32 , lowercase_ :Dict=5 , lowercase_ :Dict=4 , lowercase_ :Tuple=37 , lowercase_ :Any="gelu" , lowercase_ :Any=0.1 , lowercase_ :Optional[int]=0.1 , lowercase_ :Optional[Any]=5_12 , lowercase_ :Union[str, Any]=16 , lowercase_ :int=2 , lowercase_ :int=0.0_2 , lowercase_ :Union[str, Any]=False , lowercase_ :Any=True , lowercase_ :List[str]="None" , lowercase_ :List[str]=3 , lowercase_ :str=4 , lowercase_ :List[str]=None , )-> Union[str, Any]:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self :Optional[int] )-> int:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase_ ( self :Any )-> str:
A__ = self.get_config()
A__ = 3_00
return config
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :List[Any] )-> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :List[str] , lowercase_ :List[str] , lowercase_ :Optional[Any] , lowercase_ :Dict , lowercase_ :Any , lowercase_ :Dict , lowercase_ :List[str] )-> Optional[int]:
A__ = DebertaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )[0]
A__ = model(lowercase_ , token_type_ids=lowercase_ )[0]
A__ = model(lowercase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :List[str] , lowercase_ :List[Any] , lowercase_ :str , lowercase_ :Optional[int] , lowercase_ :List[str] )-> Union[str, Any]:
A__ = DebertaForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Optional[Any] , lowercase_ :List[str] , lowercase_ :List[Any] , lowercase_ :int , lowercase_ :Union[str, Any] , lowercase_ :List[str] )-> Any:
A__ = self.num_labels
A__ = DebertaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase_ )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :int , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Tuple , lowercase_ :int )-> Any:
A__ = self.num_labels
A__ = DebertaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self :str , lowercase_ :int , lowercase_ :int , lowercase_ :Union[str, Any] , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :Any , lowercase_ :str )-> Optional[Any]:
A__ = DebertaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self :List[str] )-> Optional[Any]:
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__lowercase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase = True
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
def UpperCAmelCase_ ( self :Dict )-> Union[str, Any]:
A__ = DebertaModelTester(self )
A__ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self :str )-> int:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :str )-> List[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase_ )
def UpperCAmelCase_ ( self :Any )-> Any:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_ )
def UpperCAmelCase_ ( self :Tuple )-> List[str]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_ )
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DebertaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
pass
@slow
def UpperCAmelCase_ ( self :Dict )-> Any:
A__ = DebertaModel.from_pretrained("microsoft/deberta-base" )
A__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(lowercase_ , attention_mask=lowercase_ )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 237 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase ( UpperCamelCase__ ):
def __get__( self :Optional[int] , lowercase_ :Tuple , lowercase_ :Tuple=None )-> Optional[Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
A__ = "__cached_" + self.fget.__name__
A__ = getattr(lowercase_ , lowercase_ , lowercase_ )
if cached is None:
A__ = self.fget(lowercase_ )
setattr(lowercase_ , lowercase_ , lowercase_ )
return cached
def UpperCamelCase ( _lowerCamelCase : Dict ):
A__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def UpperCamelCase ( _lowerCamelCase : Any ):
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def UpperCamelCase ( _lowerCamelCase : str ):
return isinstance(_lowerCamelCase , np.ndarray )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return _is_numpy(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Dict ):
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Any ):
import torch
return isinstance(_lowerCamelCase , torch.device )
def UpperCamelCase ( _lowerCamelCase : int ):
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
A__ = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def UpperCamelCase ( _lowerCamelCase : Any ):
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def UpperCamelCase ( _lowerCamelCase : List[str] ):
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def UpperCamelCase ( _lowerCamelCase : str ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : str ):
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def UpperCamelCase ( _lowerCamelCase : Tuple ):
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCamelCase ( _lowerCamelCase : int ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class UpperCAmelCase ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self :int )-> Any:
A__ = fields(self )
# Safety and consistency checks
if not len(lowercase_ ):
raise ValueError(F"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"{self.__class__.__name__} should not have more than one required field." )
A__ = getattr(self , class_fields[0].name )
A__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase_ ):
if isinstance(lowercase_ , lowercase_ ):
A__ = first_field.items()
A__ = True
else:
try:
A__ = iter(lowercase_ )
A__ = True
except TypeError:
A__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase_ ):
if (
not isinstance(lowercase_ , (list, tuple) )
or not len(lowercase_ ) == 2
or not isinstance(element[0] , lowercase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
A__ = element[1]
elif first_field is not None:
A__ = first_field
else:
for field in class_fields:
A__ = getattr(self , field.name )
if v is not None:
A__ = v
def __delitem__( self :List[Any] , *lowercase_ :List[Any] , **lowercase_ :Optional[Any] )-> Union[str, Any]:
raise Exception(F"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :Tuple , *lowercase_ :int , **lowercase_ :int )-> Union[str, Any]:
raise Exception(F"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :List[Any] , *lowercase_ :Optional[int] , **lowercase_ :Tuple )-> List[Any]:
raise Exception(F"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase_ ( self :Dict , *lowercase_ :Optional[int] , **lowercase_ :Any )-> Any:
raise Exception(F"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self :Optional[Any] , lowercase_ :Optional[Any] )-> Any:
if isinstance(lowercase_ , lowercase_ ):
A__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] )-> Tuple:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase_ , lowercase_ )
super().__setattr__(lowercase_ , lowercase_ )
def __setitem__( self :Tuple , lowercase_ :Optional[int] , lowercase_ :Tuple )-> List[Any]:
# Will raise a KeyException if needed
super().__setitem__(lowercase_ , lowercase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls :Any , lowercase_ :int )-> List[str]:
raise ValueError(
F"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """longest"""
__lowercase = """max_length"""
__lowercase = """do_not_pad"""
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """pt"""
__lowercase = """tf"""
__lowercase = """np"""
__lowercase = """jax"""
class UpperCAmelCase :
def __init__( self :List[str] , lowercase_ :List[ContextManager] )-> str:
A__ = context_managers
A__ = ExitStack()
def __enter__( self :Dict )-> Any:
for context_manager in self.context_managers:
self.stack.enter_context(lowercase_ )
def __exit__( self :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :str )-> Union[str, Any]:
self.stack.__exit__(*lowercase_ , **lowercase_ )
def UpperCamelCase ( _lowerCamelCase : Dict ):
A__ = infer_framework(_lowerCamelCase )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCamelCase ( _lowerCamelCase : List[str] ):
A__ = model_class.__name__
A__ = infer_framework(_lowerCamelCase )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCamelCase ( _lowerCamelCase : MutableMapping , _lowerCamelCase : str = "" , _lowerCamelCase : str = "." ):
def _flatten_dict(_lowerCamelCase : List[Any] , _lowerCamelCase : int="" , _lowerCamelCase : Any="." ):
for k, v in d.items():
A__ = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=None ):
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Any ):
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]=None ):
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] ):
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
A__ = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
A__ = F"{repo_id}--{value}"
return auto_map
def UpperCamelCase ( _lowerCamelCase : Dict ):
for base_class in inspect.getmro(_lowerCamelCase ):
A__ = base_class.__module__
A__ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 237 | 1 |
import torch
from torch import nn
class lowercase__ ( nn.Module):
def __init__( self : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = n_token
SCREAMING_SNAKE_CASE : str = d_embed
SCREAMING_SNAKE_CASE : Dict = d_proj
SCREAMING_SNAKE_CASE : str = cutoffs + [n_token]
SCREAMING_SNAKE_CASE : List[str] = [0] + self.cutoffs
SCREAMING_SNAKE_CASE : Union[str, Any] = div_val
SCREAMING_SNAKE_CASE : Optional[int] = self.cutoffs[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList()
SCREAMING_SNAKE_CASE : int = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
else:
self.out_projs.append(UpperCamelCase__ )
self.out_layers.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase__ , r_idx - l_idx ) )
SCREAMING_SNAKE_CASE : List[str] = keep_order
def __A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if proj is None:
SCREAMING_SNAKE_CASE : List[Any] = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
SCREAMING_SNAKE_CASE : Any = nn.functional.linear(UpperCamelCase__ , proj.t().contiguous() )
SCREAMING_SNAKE_CASE : List[Any] = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __A ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
SCREAMING_SNAKE_CASE : Any = hidden[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE : List[Any] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Dict = hidden.view(-1 , hidden.size(-1 ) )
SCREAMING_SNAKE_CASE : str = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
SCREAMING_SNAKE_CASE : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE : Tuple = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = labels != -100
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE : Dict = (
-nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE : int = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE : str = self.out_layers[i].weight
SCREAMING_SNAKE_CASE : List[Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE : Any = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE : Dict = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
if labels is None:
SCREAMING_SNAKE_CASE : str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[Any] = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
SCREAMING_SNAKE_CASE : int = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
SCREAMING_SNAKE_CASE : Tuple = (labels >= l_idx) & (labels < r_idx)
SCREAMING_SNAKE_CASE : str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
SCREAMING_SNAKE_CASE : List[str] = labels.index_select(0 , UpperCamelCase__ ) - l_idx
SCREAMING_SNAKE_CASE : str = head_logprob.index_select(0 , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = hidden.index_select(0 , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Any = hidden
if i == 0:
if labels is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE : Any = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE : str = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
SCREAMING_SNAKE_CASE : Tuple = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
SCREAMING_SNAKE_CASE : List[Any] = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __A ( self : Optional[Any] , UpperCamelCase__ : Tuple ):
'''simple docstring'''
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE : Optional[int] = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE : str = self.out_layers[i].weight
SCREAMING_SNAKE_CASE : Union[str, Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
SCREAMING_SNAKE_CASE : Any = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
SCREAMING_SNAKE_CASE : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE : List[Any] = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE : Dict = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = head_logprob[:, -i] + tail_logprob_i
SCREAMING_SNAKE_CASE : int = logprob_i
return out
| 364 | from __future__ import annotations
from math import pi
def A ( _lowercase , _lowercase , _lowercase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 253 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Union[str, Any] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self: Optional[int] ) -> Any:
_A = 1
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__A )
return image
@property
def __A ( self: str ) -> Optional[Any]:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __A ( self: int ) -> Optional[Any]:
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __A ( self: int ) -> Union[str, Any]:
torch.manual_seed(0 )
_A = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__A )
@property
def __A ( self: Any ) -> List[Any]:
def extract(*__A: Union[str, Any] , **__A: Tuple ):
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Tuple ) -> int:
_A = torch.ones([0] )
def __A ( self: List[str] , __A: str ) -> Union[str, Any]:
self.pixel_values.to(__A )
return self
return Out()
return extract
def __A ( self: int ) -> Any:
_A = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet
_A = PNDMScheduler(skip_prk_steps=__A )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_A = 77
_A = self.dummy_image.to(__A )
_A = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_A = AltDiffusionImgaImgPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
_A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__A )
_A = alt_pipe.to(__A )
alt_pipe.set_progress_bar_config(disable=__A )
_A = 'A painting of a squirrel eating a burger'
_A = torch.Generator(device=__A ).manual_seed(0 )
_A = alt_pipe(
[prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__A , )
_A = output.images
_A = torch.Generator(device=__A ).manual_seed(0 )
_A = alt_pipe(
[prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__A , return_dict=__A , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __A ( self: Optional[int] ) -> int:
_A = self.dummy_cond_unet
_A = PNDMScheduler(skip_prk_steps=__A )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_A = 77
_A = self.dummy_image.to(__A )
# put models in fp16
_A = unet.half()
_A = vae.half()
_A = bert.half()
# make sure here that pndm scheduler skips prk
_A = AltDiffusionImgaImgPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
_A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__A )
_A = alt_pipe.to(__A )
alt_pipe.set_progress_bar_config(disable=__A )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = alt_pipe(
[prompt] , generator=__A , num_inference_steps=2 , output_type='''np''' , image=__A , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __A ( self: Any ) -> List[str]:
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
_A = init_image.resize((7_60, 5_04) )
_A = 'BAAI/AltDiffusion'
_A = AltDiffusionImgaImgPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_A = 'A fantasy landscape, trending on artstation'
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__A , image=__A , strength=0.75 , guidance_scale=7.5 , generator=__A , output_type='''np''' , )
_A = output.images[0]
_A = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_A = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Tuple ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self: Union[str, Any] ) -> List[Any]:
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_A = init_image.resize((7_68, 5_12) )
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
_A = 'BAAI/AltDiffusion'
_A = AltDiffusionImgaImgPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_A = 'A fantasy landscape, trending on artstation'
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__A , image=__A , strength=0.75 , guidance_scale=7.5 , generator=__A , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 367 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "facebook/bart-large-mnli"
A_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A_ = "text_classifier"
A_ = AutoTokenizer
A_ = AutoModelForSequenceClassification
A_ = ["text", ["text"]]
A_ = ["text"]
def __A ( self: int ) -> str:
super().setup()
_A = self.model.config
_A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_A = int(__A )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: List[str] ) -> int:
_A = labels
return self.pre_processor(
[text] * len(__A ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __A ( self: str , __A: List[Any] ) -> Union[str, Any]:
_A = outputs.logits
_A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 75 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def A_ ( A__ , A__ , A__ , A__ , A__ ) -> float:
a__ : Optional[Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(A__ )] )
a__ : Dict = np.array(A__ )
a__ : List[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , A__ ) ) , x.transpose() ) , A__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def A_ ( A__ , A__ , A__ ) -> float:
a__ : List[Any] = (1, 2, 1)
a__ : Union[str, Any] = (1, 1, 0, 7)
a__ : str = SARIMAX(
A__ , exog=A__ , order=A__ , seasonal_order=A__ )
a__ : List[str] = model.fit(disp=A__ , maxiter=600 , method='nm' )
a__ : List[Any] = model_fit.predict(1 , len(A__ ) , exog=[test_match] )
return result[0]
def A_ ( A__ , A__ , A__ ) -> float:
a__ : Optional[Any] = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(A__ , A__ )
a__ : Optional[int] = regressor.predict(A__ )
return y_pred[0]
def A_ ( A__ ) -> float:
train_user.sort()
a__ : Union[str, Any] = np.percentile(A__ , 25 )
a__ : Dict = np.percentile(A__ , 75 )
a__ : Union[str, Any] = qa - qa
a__ : str = qa - (iqr * 0.1)
return low_lim
def A_ ( A__ , A__ ) -> bool:
a__ : Optional[Any] = 0
a__ : List[Any] = 0
for i in list_vote:
if i > actual_result:
a__ : Tuple = not_safe + 1
else:
if abs(abs(A__ ) - abs(A__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase : Optional[int] = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
lowercase : List[str] = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
lowercase : str = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase : List[str] = normalize_df[:, 2].tolist()
lowercase : int = normalize_df[:, 0].tolist()
lowercase : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase : Optional[int] = normalize_df[:, [1, 2]].tolist()
lowercase : Optional[Any] = x[: len(x) - 1]
lowercase : Any = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase : Union[str, Any] = total_date[: len(total_date) - 1]
lowercase : Optional[Any] = total_user[: len(total_user) - 1]
lowercase : List[Any] = total_match[: len(total_match) - 1]
lowercase : List[Any] = total_date[len(total_date) - 1 :]
lowercase : Optional[Any] = total_user[len(total_user) - 1 :]
lowercase : Union[str, Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase : Dict = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 99 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase : List[str] = """src/transformers"""
lowercase : Optional[int] = """docs/source/en/tasks"""
def A_ ( A__ , A__ , A__ ) -> Tuple:
with open(A__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ : Any = f.readlines()
# Find the start prompt.
a__ : str = 0
while not lines[start_index].startswith(A__ ):
start_index += 1
start_index += 1
a__ : int = start_index
while not lines[end_index].startswith(A__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Tuple = direct_transformers_import(TRANSFORMERS_PATH)
lowercase : Optional[Any] = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase : Optional[Any] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def A_ ( A__ ) -> Optional[int]:
a__ : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
a__ : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A__ , set() )
a__ : Optional[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def A_ ( A__ , A__=False ) -> Optional[int]:
a__ , a__ , a__ , a__ : Dict = _find_text_in_file(
filename=os.path.join(A__ , A__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
a__ : List[Any] = get_model_list_for_task(A__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(A__ , A__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase : str = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 99 | 1 |
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCAmelCase_ : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = True
_UpperCAmelCase : int = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
order.append(lowerCAmelCase_ )
return order
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = True
_UpperCAmelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return component
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : str = len(lowerCAmelCase_ ) * [False]
_UpperCAmelCase : dict[int, list[int]] = {vert: [] for vert in range(len(lowerCAmelCase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = []
for i, was_visited in enumerate(lowerCAmelCase_ ):
if not was_visited:
order += topology_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : int = []
_UpperCAmelCase : Union[str, Any] = len(lowerCAmelCase_ ) * [False]
for i in range(len(lowerCAmelCase_ ) ):
_UpperCAmelCase : Union[str, Any] = order[len(lowerCAmelCase_ ) - i - 1]
if not visited[vert]:
_UpperCAmelCase : List[Any] = find_components(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
components_list.append(lowerCAmelCase_ )
return components_list
| 358 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ : Tuple = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Tuple = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __lowerCAmelCase ( __a ):
snake_case : Optional[Any] = VOCAB_FILES_NAMES
snake_case : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : str = ["""input_ids""", """attention_mask"""]
snake_case : List[str] = RobertaTokenizer
def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase__ ) != add_prefix_space:
_UpperCAmelCase : Tuple = getattr(lowerCAmelCase__ , pre_tok_state.pop("""type""" ) )
_UpperCAmelCase : Any = add_prefix_space
_UpperCAmelCase : List[Any] = pre_tok_class(**lowerCAmelCase__ )
_UpperCAmelCase : Dict = add_prefix_space
_UpperCAmelCase : int = """post_processor"""
_UpperCAmelCase : Any = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
_UpperCAmelCase : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Any = tuple(state["""sep"""] )
if "cls" in state:
_UpperCAmelCase : Tuple = tuple(state["""cls"""] )
_UpperCAmelCase : Dict = False
if state.get("""add_prefix_space""" , lowerCAmelCase__ ) != add_prefix_space:
_UpperCAmelCase : List[str] = add_prefix_space
_UpperCAmelCase : Dict = True
if state.get("""trim_offsets""" , lowerCAmelCase__ ) != trim_offsets:
_UpperCAmelCase : Tuple = trim_offsets
_UpperCAmelCase : List[str] = True
if changes_to_apply:
_UpperCAmelCase : Dict = getattr(lowerCAmelCase__ , state.pop("""type""" ) )
_UpperCAmelCase : Optional[Any] = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
def snake_case_ (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
_UpperCAmelCase : int = value
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=None ):
_UpperCAmelCase : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : str = [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 170 | 0 |
"""simple docstring"""
import numpy as np
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : List[str] = int(np.ceil((x_end - xa) / h ) )
_lowerCamelCase : Any = np.zeros((n + 1,) )
_lowerCamelCase : Optional[int] = ya
_lowerCamelCase : Dict = xa
for k in range(lowercase__ ):
_lowerCamelCase : Any = f(lowercase__ , y[k] )
_lowerCamelCase : str = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_lowerCamelCase : Tuple = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_lowerCamelCase : Optional[Any] = f(x + h , y[k] + h * ka )
_lowerCamelCase : Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | '''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_ ( snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCamelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
__lowerCAmelCase = mam_aaa["""model"""]
remove_ignore_keys_(snake_case_ )
__lowerCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__lowerCAmelCase = MaMaaaConfig(
vocab_size=snake_case_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
__lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCAmelCase = MaMaaaForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ , strict=snake_case_ )
__lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : str = parser.parse_args()
_A : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 229 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase ) -> int:
'''simple docstring'''
__UpperCamelCase : Any = data
__UpperCamelCase : Node | None = None
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[str] = None
__UpperCamelCase : Optional[Any] = None
def __iter__( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self.head
while self.head:
yield node.data
__UpperCamelCase : List[Any] = node.next
if node == self.head:
break
def __len__( self ) -> Union[str, Any]:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> str:
'''simple docstring'''
return "->".join(str(__snake_case ) for item in iter(self ) )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
self.insert_nth(len(self ) , __snake_case )
def __lowerCamelCase ( self , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
self.insert_nth(0 , __snake_case )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
__UpperCamelCase : List[str] = Node(__snake_case )
if self.head is None:
__UpperCamelCase : Union[str, Any] = new_node # first node points itself
__UpperCamelCase : Any = new_node
elif index == 0: # insert at head
__UpperCamelCase : str = self.head
__UpperCamelCase : Optional[Any] = new_node
else:
__UpperCamelCase : str = self.head
for _ in range(index - 1 ):
__UpperCamelCase : int = temp.next
__UpperCamelCase : List[str] = temp.next
__UpperCamelCase : Optional[Any] = new_node
if index == len(self ) - 1: # insert at tail
__UpperCamelCase : Optional[int] = new_node
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.delete_nth(0 )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __lowerCamelCase ( self , __UpperCamelCase = 0 ) -> int:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
__UpperCamelCase : List[Any] = self.head
if self.head == self.tail: # just one node
__UpperCamelCase : Tuple = None
elif index == 0: # delete head node
__UpperCamelCase : str = self.tail.next.next
__UpperCamelCase : Union[str, Any] = self.head.next
else:
__UpperCamelCase : Optional[int] = self.head
for _ in range(index - 1 ):
__UpperCamelCase : List[Any] = temp.next
__UpperCamelCase : Tuple = temp.next
__UpperCamelCase : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
__UpperCamelCase : Optional[int] = temp
return delete_node.data
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return len(self ) == 0
def UpperCAmelCase_ ():
__UpperCamelCase : Union[str, Any] = CircularLinkedList()
assert len(_A ) == 0
assert circular_linked_list.is_empty() is True
assert str(_A ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_A ) == i
circular_linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | 358 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowercase : Union[str, Any] = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 171 | 0 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL | 6 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12 | 0 |
import math
import sys
import cva
import numpy as np
def _A ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float ):
"""simple docstring"""
lowerCAmelCase__ = math.sqrt(lowerCAmelCase_ )
lowerCAmelCase__ = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def _A ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : float ):
"""simple docstring"""
lowerCAmelCase__ = np.zeros((kernel_size, kernel_size) )
for i in range(0 , lowerCAmelCase_ ):
for j in range(0 , lowerCAmelCase_ ):
lowerCAmelCase__ = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(lowerCAmelCase_ , lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : int , ):
"""simple docstring"""
lowerCAmelCase__ = np.zeros(img.shape )
lowerCAmelCase__ = get_gauss_kernel(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase__ = get_slice(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase__ = vec_gaussian(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = np.multiply(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = np.multiply(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = np.sum(lowerCAmelCase_ ) / np.sum(lowerCAmelCase_ )
lowerCAmelCase__ = val
return imga
def _A ( lowerCAmelCase_ : list ):
"""simple docstring"""
lowerCAmelCase__ = args[1] if args[1:] else "../image_data/lena.jpg"
lowerCAmelCase__ = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase__ = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase__ = int(args[4] )
lowerCAmelCase__ = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase__ = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = parse_args(sys.argv)
UpperCamelCase = cva.imread(filename, 0)
cva.imshow('input image', img)
UpperCamelCase = img / 255
UpperCamelCase = out.astype('float32')
UpperCamelCase = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
UpperCamelCase = out * 255
UpperCamelCase = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 356 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , **lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = AutoConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 221 | 0 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _A ( lowercase__ ):
return np.dot(lowercase__ , lowercase__ )
class A :
def __init__( self , *,
lowerCamelCase__ = np.inf , lowerCamelCase__ = "linear" , lowerCamelCase__ = 0.0 , ) -> None:
'''simple docstring'''
lowercase__ = regularization
lowercase__ = gamma
if kernel == "linear":
lowercase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
lowercase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase__ = F'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> float:
'''simple docstring'''
return np.dot(lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
lowercase__ = observations
lowercase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowercase__) , ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ ) -> float:
lowercase__ = 0
((lowercase__) , ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
lowercase__ = LinearConstraint(lowerCamelCase__ , 0 , 0 )
lowercase__ = Bounds(0 , self.regularization )
lowercase__ = minimize(
lowerCamelCase__ , np.ones(lowerCamelCase__ ) , bounds=lowerCamelCase__ , constraints=[ly_contraint] ).x
lowercase__ = l_star
# calculating mean offset of separation plane to points
lowercase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowercase__ = s / n
def A__ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
lowercase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A = "sshleifer/mar_enro_6_3_student"
class A ( __UpperCAmelCase ):
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
lowercase__ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase__ , )
lowercase__ = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def A__ ( self ) -> str:
'''simple docstring'''
MarianMTModel.from_pretrained(lowerCamelCase__ )
@slow
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowercase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase__ = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase__ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationModule.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
lowercase__ = main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class A ( __UpperCAmelCase ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
lowercase__ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowercase__ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowercase__ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = bash_script.replace("""--fp16""" , """""" )
lowercase__ = 6
lowercase__ = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'''--num_train_epochs={epochs}''',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationDistiller.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase__ = distill_main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 164 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
'''simple docstring'''
A__ = checkpoint
A__ = {}
A__ = vae_state_dict['encoder.conv_in.weight']
A__ = vae_state_dict['encoder.conv_in.bias']
A__ = vae_state_dict['encoder.conv_out.weight']
A__ = vae_state_dict['encoder.conv_out.bias']
A__ = vae_state_dict['encoder.norm_out.weight']
A__ = vae_state_dict['encoder.norm_out.bias']
A__ = vae_state_dict['decoder.conv_in.weight']
A__ = vae_state_dict['decoder.conv_in.bias']
A__ = vae_state_dict['decoder.conv_out.weight']
A__ = vae_state_dict['decoder.conv_out.bias']
A__ = vae_state_dict['decoder.norm_out.weight']
A__ = vae_state_dict['decoder.norm_out.bias']
A__ = vae_state_dict['quant_conv.weight']
A__ = vae_state_dict['quant_conv.bias']
A__ = vae_state_dict['post_quant_conv.weight']
A__ = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
A__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE__ )
}
# Retrieves the keys for the decoder up blocks only
A__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE__ )
}
for i in range(SCREAMING_SNAKE_CASE__ ):
A__ = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
A__ = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
A__ = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
A__ = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE__ )
A__ = {'old': f'down.{i}.block', 'new': f'down_blocks.{i}.resnets'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ )
A__ = [key for key in vae_state_dict if 'encoder.mid.block' in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
A__ = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE__ )
A__ = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ )
A__ = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
A__ = renew_vae_attention_paths(SCREAMING_SNAKE_CASE__ )
A__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ )
conv_attn_to_linear(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
A__ = num_up_blocks - 1 - i
A__ = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
A__ = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
A__ = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
A__ = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE__ )
A__ = {'old': f'up.{block_id}.block', 'new': f'up_blocks.{i}.resnets'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ )
A__ = [key for key in vae_state_dict if 'decoder.mid.block' in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
A__ = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE__ )
A__ = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ )
A__ = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
A__ = renew_vae_attention_paths(SCREAMING_SNAKE_CASE__ )
A__ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE__ )
conv_attn_to_linear(SCREAMING_SNAKE_CASE__ )
return new_checkpoint
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , ) -> str:
'''simple docstring'''
A__ = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
A__ = io.BytesIO(r.content )
A__ = OmegaConf.load(SCREAMING_SNAKE_CASE__ )
A__ = 512
A__ = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
A__ = {}
with safe_open(SCREAMING_SNAKE_CASE__ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
A__ = f.get_tensor(SCREAMING_SNAKE_CASE__ )
else:
A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ )['state_dict']
# Convert the VAE model.
A__ = create_vae_diffusers_config(SCREAMING_SNAKE_CASE__ , image_size=SCREAMING_SNAKE_CASE__ )
A__ = custom_convert_ldm_vae_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = AutoencoderKL(**SCREAMING_SNAKE_CASE__ )
vae.load_state_dict(SCREAMING_SNAKE_CASE__ )
vae.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
lowercase_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 282 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="attention" ) -> Union[str, Any]:
'''simple docstring'''
A__ = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=False ) -> str:
'''simple docstring'''
if split_mlp_wi:
A__ = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
A__ = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
A__ = (wi_a, wi_a)
else:
A__ = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
A__ = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def _snake_case( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool ) -> int:
'''simple docstring'''
A__ = traverse_util.flatten_dict(variables['target'] )
A__ = {'/'.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A__ = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE__ )
A__ = collections.OrderedDict()
# Shared embeddings.
A__ = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (MLP).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_mlp_layer_norm' )
A__ , A__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , SCREAMING_SNAKE_CASE__ )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old[
'encoder/relpos_bias/rel_embedding'
].T
A__ = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_self_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'self_attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (Cross Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_cross_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'encoder_decoder_attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 2 (MLP).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_mlp_layer_norm' )
A__ , A__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , SCREAMING_SNAKE_CASE__ )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old['decoder/decoder_norm/scale']
A__ = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A__ = old['decoder/logits_dense/kernel'].T
return new
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : bool ) -> Dict:
'''simple docstring'''
A__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A__ = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A__ = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
A__ = state_dict['shared.weight']
return state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
'''simple docstring'''
A__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
A__ = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ )
A__ = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : bool = False ) -> Any:
'''simple docstring'''
A__ = TaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A__ = TaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
A__ = TaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('Done' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 282 | 1 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : Tuple = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
lowerCamelCase : Tuple = 10
lowerCamelCase : List[Any] = 256
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
if len(A ) < MIN_NUM_TOKENS:
return None
lowercase__ = MinHash(num_perm=A )
for token in set(A ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE (A ) -> Union[str, Any]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(A ) if len(t.strip() ) > 0}
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : List[str] , *,
UpperCamelCase : float = 0.85 , ):
'''simple docstring'''
lowercase__ = duplication_jaccard_threshold
lowercase__ = NUM_PERM
lowercase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowercase__ = defaultdict(_lowerCAmelCase )
def UpperCamelCase__ (self : str , UpperCamelCase : Tuple , UpperCamelCase : MinHash ):
'''simple docstring'''
lowercase__ = self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(f"Duplicate key {code_key}" )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase__ = [base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
lowercase__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def UpperCamelCase__ (self : Dict , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = self.get_duplicate_clusters()
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ = element
lowercase__ = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(A , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE (A , A ) -> int:
"""simple docstring"""
lowercase__ = DuplicationIndex(duplication_jaccard_threshold=A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A ) ) , max_queue_size=100 ) ):
di.add(A , A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE (A , A ) -> Dict:
"""simple docstring"""
lowercase__ = get_tokens(A )
lowercase__ = get_tokens(A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : Dict = None
def _SCREAMING_SNAKE_CASE (A , A ) -> Dict:
"""simple docstring"""
lowercase__ = []
for elementa in cluster:
lowercase__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
lowercase__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(A , A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase__ = 1
extremes.append(A )
return extremes
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]:
"""simple docstring"""
global _shared_dataset
lowercase__ = dataset
lowercase__ = []
lowercase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
A , A , ) , total=len(A ) , ):
extremes_list.append(A )
return extremes_list
def _SCREAMING_SNAKE_CASE (A , A = 0.85 ) -> int:
"""simple docstring"""
lowercase__ = make_duplicate_clusters(A , A )
lowercase__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
lowercase__ = {}
lowercase__ = find_extremes(A , A , A )
for extremes in extremes_clusters:
for element in extremes:
lowercase__ = element
lowercase__ = duplicate_indices - set(extreme_dict.keys() )
lowercase__ = dataset.filter(lambda A , A : idx not in remove_indices , with_indices=A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
lowercase__ = extreme_dict[element["base_index"]]["copies"]
print(f"Original dataset size: {len(A )}" )
print(f"Number of duplicate clusters: {len(A )}" )
print(f"Files in duplicate cluster: {len(A )}" )
print(f"Unique files in duplicate cluster: {len(A )}" )
print(f"Filtered dataset size: {len(A )}" )
return ds_filter, duplicate_clusters
| 2 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase : Dict = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=8 ):
lowercase :List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase :List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Tuple , _lowerCAmelCase: UNetaDConditionModel , _lowerCAmelCase: DDPMScheduler , _lowerCAmelCase: VQModel , ):
super().__init__()
self.register_modules(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , movq=_lowerCAmelCase , )
lowercase :List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int] ):
if latents is None:
lowercase :int = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowercase :Optional[Any] = latents.to(_lowerCAmelCase )
lowercase :int = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase :List[Any] = torch.device(F"cuda:{gpu_id}" )
lowercase :List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Dict=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase :List[Any] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase :List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase :Dict = cpu_offload_with_hook(_lowerCAmelCase , _lowerCAmelCase , prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
lowercase :Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self: int ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self: str , _lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase: int = 5_12 , _lowerCAmelCase: int = 5_12 , _lowerCAmelCase: int = 1_00 , _lowerCAmelCase: float = 4.0 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: Optional[str] = "pil" , _lowerCAmelCase: bool = True , ):
lowercase :str = self._execution_device
lowercase :List[str] = guidance_scale > 1.0
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Any = torch.cat(_lowerCAmelCase , dim=0 )
lowercase :Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :List[Any] = torch.cat(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
lowercase :int = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
lowercase :Any = negative_image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
lowercase :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
lowercase :Optional[Any] = self.scheduler.timesteps
lowercase :Tuple = self.unet.config.in_channels
lowercase , lowercase :Optional[int] = downscale_height_and_width(_lowerCAmelCase , _lowerCAmelCase , self.movq_scale_factor )
# create initial latent
lowercase :List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase :List[str] = {"image_embeds": image_embeds}
lowercase :List[Any] = self.unet(
sample=_lowerCAmelCase , timestep=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , added_cond_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
if do_classifier_free_guidance:
lowercase , lowercase :List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase :Any = noise_pred.chunk(2 )
lowercase , lowercase :Dict = variance_pred.chunk(2 )
lowercase :str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase :Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase :str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase :Tuple = self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase , )[0]
# post-processing
lowercase :Dict = self.movq.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase :Any = image * 0.5 + 0.5
lowercase :Tuple = image.clamp(0 , 1 )
lowercase :List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase :List[Any] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 236 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "swin2sr"
__A : Dict = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , snake_case__ : List[str]=6_4 , snake_case__ : Union[str, Any]=1 , snake_case__ : Tuple=3 , snake_case__ : int=1_8_0 , snake_case__ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , snake_case__ : List[str]=[6, 6, 6, 6, 6, 6] , snake_case__ : Tuple=8 , snake_case__ : List[Any]=2.0 , snake_case__ : Any=True , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.1 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=False , snake_case__ : Any=0.02 , snake_case__ : Any=1e-5 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]="1conv" , snake_case__ : List[str]="pixelshuffle" , **snake_case__ : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Dict = image_size
lowercase :List[str] = patch_size
lowercase :Tuple = num_channels
lowercase :int = embed_dim
lowercase :Any = depths
lowercase :Union[str, Any] = len(snake_case__ )
lowercase :List[str] = num_heads
lowercase :int = window_size
lowercase :Tuple = mlp_ratio
lowercase :List[Any] = qkv_bias
lowercase :Optional[int] = hidden_dropout_prob
lowercase :Tuple = attention_probs_dropout_prob
lowercase :Tuple = drop_path_rate
lowercase :Optional[Any] = hidden_act
lowercase :Union[str, Any] = use_absolute_embeddings
lowercase :Dict = layer_norm_eps
lowercase :Optional[Any] = initializer_range
lowercase :Optional[Any] = upscale
lowercase :Any = img_range
lowercase :Optional[int] = resi_connection
lowercase :Union[str, Any] = upsampler
| 172 |
"""simple docstring"""
def lowerCamelCase (a_ :list , a_ :list , a_ :int , a_ :int , a_ :int) -> int:
if index == number_of_items:
return 0
lowercase :Optional[int] = 0
lowercase :str = 0
lowercase :List[str] = knapsack(a_ , a_ , a_ , a_ , index + 1)
if weights[index] <= max_weight:
lowercase :Any = values[index] + knapsack(
a_ , a_ , a_ , max_weight - weights[index] , index + 1)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def UpperCAmelCase_ ( __snake_case ) -> typing.Counter[int]:
"""simple docstring"""
_lowercase =Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__snake_case , max_perimeter + 1 ):
_lowercase =(base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__snake_case ):
_lowercase =int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCAmelCase_ ( __snake_case = 1000 ) -> int:
"""simple docstring"""
_lowercase =pythagorean_triple(__snake_case )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 5 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase : Tuple = True
for i in range(0 , len(UpperCAmelCase_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase : Any = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase : Tuple = False
for i in range(1 , len(UpperCAmelCase_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase : int = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
lowercase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowercase__ = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 362 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger("transformers.models.speecht5")
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
hf_model.apply_weight_norm()
UpperCAmelCase : Dict = checkpoint['input_conv.weight_g']
UpperCAmelCase : Any = checkpoint['input_conv.weight_v']
UpperCAmelCase : Any = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase : Union[str, Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase : str = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase : Any = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase : Dict = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase : Dict = checkpoint['output_conv.1.weight_g']
UpperCAmelCase : str = checkpoint['output_conv.1.weight_v']
UpperCAmelCase : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if config_path is not None:
UpperCAmelCase : Any = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase_ )
else:
UpperCAmelCase : Optional[Any] = SpeechTaHifiGanConfig()
UpperCAmelCase : List[Any] = SpeechTaHifiGan(UpperCAmelCase_ )
UpperCAmelCase : int = torch.load(UpperCAmelCase_ )
load_weights(orig_checkpoint['model']['generator'] , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : int = np.load(UpperCAmelCase_ )
UpperCAmelCase : str = stats[0].reshape(-1 )
UpperCAmelCase : List[str] = stats[1].reshape(-1 )
UpperCAmelCase : Union[str, Any] = torch.from_numpy(UpperCAmelCase_ ).float()
UpperCAmelCase : List[str] = torch.from_numpy(UpperCAmelCase_ ).float()
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 280 | 0 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Any = logging.get_logger(__name__)
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
lowerCAmelCase : Union[str, Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , _snake_case )
if matches:
lowerCAmelCase : List[Any] = float(matches[1] )
lowerCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCAmelCase : Tuple = 1001
lowerCAmelCase : Optional[int] = '''imagenet-1k-id2label.json'''
lowerCAmelCase : List[Any] = '''huggingface/label-files'''
lowerCAmelCase : str = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase : List[str] = {int(_snake_case ) + 1: v for k, v in idalabel.items()}
lowerCAmelCase : Optional[int] = '''background'''
lowerCAmelCase : Tuple = idalabel
lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( ):
lowerCAmelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : List[str] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Tuple=False ):
lowerCAmelCase : Dict = get_mobilenet_va_config(_snake_case )
# Load 🤗 model
lowerCAmelCase : Union[str, Any] = MobileNetVaForImageClassification(_snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_snake_case , _snake_case , _snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCAmelCase : Dict = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
lowerCAmelCase : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase : List[Any] = model(**_snake_case )
lowerCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCAmelCase : str = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCAmelCase : Any = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
lowerCAmelCase : Union[str, Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print('''Pushing to the hub...''' )
lowerCAmelCase : Optional[Any] = '''google/''' + model_name
image_processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 60 |
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if not isinstance(_snake_case , _snake_case ):
raise TypeError('''only integers accepted as input''' )
else:
lowerCAmelCase : List[str] = str(abs(_snake_case ) )
lowerCAmelCase : Optional[Any] = [list(_snake_case ) for char in range(len(_snake_case ) )]
for index in range(len(_snake_case ) ):
num_transpositions[index].pop(_snake_case )
return max(
int(''''''.join(list(_snake_case ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 60 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _lowerCAmelCase :
__UpperCAmelCase : Any = BlenderbotSmallConfig
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = '''gelu'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=20 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=0 , ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = parent
snake_case : List[Any] = batch_size
snake_case : Tuple = seq_length
snake_case : str = is_training
snake_case : int = use_labels
snake_case : Tuple = vocab_size
snake_case : Any = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : int = intermediate_size
snake_case : str = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : str = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Any = bos_token_id
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case : str = prepare_blenderbot_small_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : Any = TFBlenderbotSmallModel(config=UpperCamelCase__ ).get_decoder()
snake_case : Tuple = inputs_dict["input_ids"]
snake_case : Optional[Any] = input_ids[:1, :]
snake_case : Tuple = inputs_dict["attention_mask"][:1, :]
snake_case : List[str] = inputs_dict["head_mask"]
snake_case : Tuple = 1
# first forward pass
snake_case : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
snake_case ,snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case : List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
snake_case : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
snake_case : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def __lowerCAmelCase ( lowercase : str , lowercase : Union[str, Any] , lowercase : int , lowercase : int=None , lowercase : Any=None , lowercase : Any=None , lowercase : Optional[Any]=None , lowercase : Optional[Any]=None , ) -> str:
"""simple docstring"""
if attention_mask is None:
snake_case : Optional[Any] = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__UpperCAmelCase : Optional[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : Optional[int] = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : Tuple = False
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : str = TFBlenderbotSmallModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : List[str] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
__UpperCAmelCase : Tuple = '''facebook/blenderbot_small-90M'''
@cached_property
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = self.tokenizer(self.src_text , return_tensors="tf" )
snake_case : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
snake_case : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 112 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Tuple = '''altclip_text_model'''
def __init__( self , UpperCamelCase__=25_0002 , UpperCamelCase__=1024 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=4096 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=514 , UpperCamelCase__=1 , UpperCamelCase__=0.02 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-05 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=768 , **UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : Any = vocab_size
snake_case : List[Any] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Dict = hidden_act
snake_case : Dict = intermediate_size
snake_case : int = hidden_dropout_prob
snake_case : Optional[int] = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : Optional[int] = type_vocab_size
snake_case : Dict = initializer_range
snake_case : int = initializer_factor
snake_case : Union[str, Any] = layer_norm_eps
snake_case : List[Any] = position_embedding_type
snake_case : Any = use_cache
snake_case : str = project_dim
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Tuple = '''altclip_vision_model'''
def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=3072 , UpperCamelCase__=512 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3 , UpperCamelCase__=224 , UpperCamelCase__=32 , UpperCamelCase__="quick_gelu" , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1.0 , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
snake_case : Optional[int] = hidden_size
snake_case : str = intermediate_size
snake_case : List[str] = projection_dim
snake_case : Optional[Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : str = num_channels
snake_case : List[str] = patch_size
snake_case : List[Any] = image_size
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = initializer_factor
snake_case : Any = attention_dropout
snake_case : Dict = layer_norm_eps
snake_case : List[str] = hidden_act
@classmethod
def lowerCamelCase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__ )
snake_case ,snake_case : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
snake_case : Optional[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : str = '''altclip'''
__UpperCAmelCase : Optional[Any] = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=2.6592 , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : List[str] = kwargs.pop("text_config_dict" , UpperCamelCase__ )
snake_case : Union[str, Any] = kwargs.pop("vision_config_dict" , UpperCamelCase__ )
super().__init__(**UpperCamelCase__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case : List[str] = {}
# This is the complete result when using `text_config_dict`.
snake_case : Dict = AltCLIPTextConfig(**UpperCamelCase__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case : Optional[Any] = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
snake_case : Any = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(UpperCamelCase__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
snake_case : Union[str, Any] = {}
# This is the complete result when using `vision_config_dict`.
snake_case : int = AltCLIPVisionConfig(**UpperCamelCase__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case : Optional[int] = {
str(UpperCamelCase__ ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case : int = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
snake_case : Optional[Any] = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(UpperCamelCase__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
snake_case : Optional[int] = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
snake_case : Dict = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
snake_case : Dict = AltCLIPTextConfig(**UpperCamelCase__ )
snake_case : Tuple = AltCLIPVisionConfig(**UpperCamelCase__ )
snake_case : int = projection_dim
snake_case : List[str] = logit_scale_init_value
snake_case : int = 1.0
@classmethod
def lowerCamelCase ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = copy.deepcopy(self.__dict__ )
snake_case : Optional[int] = self.text_config.to_dict()
snake_case : str = self.vision_config.to_dict()
snake_case : Optional[int] = self.__class__.model_type
return output
| 112 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def A ( _lowercase , _lowercase , _lowercase=[] ):
SCREAMING_SNAKE_CASE : int = size[0] - overlap_pixels * 2
SCREAMING_SNAKE_CASE : Optional[Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
SCREAMING_SNAKE_CASE : List[str] = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
SCREAMING_SNAKE_CASE : Tuple = np.pad(_lowercase , mode='''linear_ramp''' , pad_width=_lowercase , end_values=0 )
if "l" in remove_borders:
SCREAMING_SNAKE_CASE : List[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
SCREAMING_SNAKE_CASE : Union[str, Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
SCREAMING_SNAKE_CASE : Dict = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
SCREAMING_SNAKE_CASE : str = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def A ( _lowercase , _lowercase , _lowercase ):
return max(_lowercase , min(_lowercase , _lowercase ) )
def A ( _lowercase , _lowercase , _lowercase ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Dict = list(_lowercase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
SCREAMING_SNAKE_CASE : List[Any] = clamp_rect(_lowercase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_lowercase , (original_slice, 0) )
return result
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
SCREAMING_SNAKE_CASE : Any = tile.crop(_lowercase )
return tile
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = n % d
return n - divisor
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : CLIPTextModel , UpperCamelCase__ : CLIPTokenizer , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : DDPMScheduler , UpperCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase__ : int = 350 , ):
'''simple docstring'''
super().__init__(
vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , max_noise_level=UpperCamelCase__ , )
def __A ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
SCREAMING_SNAKE_CASE : Dict = add_overlap_rect(UpperCamelCase__ , UpperCamelCase__ , image.size )
SCREAMING_SNAKE_CASE : Optional[int] = image.crop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
SCREAMING_SNAKE_CASE : Optional[int] = translated_slice_x - (original_image_slice / 2)
SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = squeeze_tile(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = to_input.size
SCREAMING_SNAKE_CASE : Dict = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
SCREAMING_SNAKE_CASE : List[str] = super(UpperCamelCase__ , self ).__call__(image=UpperCamelCase__ , **UpperCamelCase__ ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE : Dict = unsqueeze_tile(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE : Optional[Any] = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCamelCase__ ) , mode='''L''' , )
final_image.paste(
UpperCamelCase__ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCamelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase__ : int = 75 , UpperCamelCase__ : float = 9.0 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : Optional[Union[str, List[str]]] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : int = 32 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
SCREAMING_SNAKE_CASE : Optional[int] = math.ceil(image.size[0] / tile_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = math.ceil(image.size[1] / tile_size )
SCREAMING_SNAKE_CASE : str = tcx * tcy
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for y in range(UpperCamelCase__ ):
for x in range(UpperCamelCase__ ):
self._process_tile(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prompt=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , noise_level=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def A ( ):
# Run a demo
SCREAMING_SNAKE_CASE : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE : Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(_lowercase , revision='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to('''cuda''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(_lowercase ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save('''diffusers_library_progress.jpg''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(image=_lowercase , prompt='''Black font, white background, vector''' , noise_level=40 , callback=_lowercase )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 182 | import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase__ ( unittest.TestCase):
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : str = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE : str = CLIPImageProcessor(**UpperCamelCase__ )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
config.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[int] = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''clip-base''' )
def __A ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(UpperCamelCase__ , revision='''aaaaaa''' )
def __A ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __A ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCamelCase__ ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE : Any = Path(UpperCamelCase__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCamelCase__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCamelCase__ , '''w''' ) )
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __A ( self : Any ):
'''simple docstring'''
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoImageProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCamelCase__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 182 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCamelCase )
return parser.parse_args()
def a_ ( ):
lowerCAmelCase = parse_args()
# Import training_script as a module.
lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase = script_fpath.stem
lowerCAmelCase = importlib.import_module(lowerCamelCase )
# Patch sys.argv
lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 355 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase : str = "AAPL" ):
lowerCAmelCase = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , 'html.parser' )
lowerCAmelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 55 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = KandinskyImgaImgPipeline
lowercase_ : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowercase_ : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowercase_ : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ : Union[str, Any] = False
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=10_05, )
_lowercase : Optional[int] = MultilingualCLIP(lowerCamelCase)
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Union[str, Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Optional[Any] = UNetaDConditionModel(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.dummy_text_encoder
_lowercase : List[Any] = self.dummy_tokenizer
_lowercase : int = self.dummy_unet
_lowercase : int = self.dummy_movq
_lowercase : Optional[int] = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : List[Any] = DDIMScheduler(**lowerCamelCase)
_lowercase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
_lowercase : List[str] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(lowerCamelCase)
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[int] = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Tuple = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB').resize((2_56, 2_56))
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[str] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = 'cpu'
_lowercase : Tuple = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : Optional[int] = output.images
_lowercase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase), return_dict=lowerCamelCase, )[0]
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Tuple = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
_lowercase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase)
_lowercase : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1', torch_dtype=torch.floataa)
_lowercase : List[Any] = pipeline.to(lowerCamelCase)
pipeline.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = torch.Generator(device='cpu').manual_seed(0)
_lowercase , _lowercase : List[Any] = pipe_prior(
lowerCamelCase, generator=lowerCamelCase, num_inference_steps=5, negative_prompt='', ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
lowerCamelCase, image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=1_00, height=7_68, width=7_68, strength=0.2, output_type='np', )
_lowercase : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 21 |
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if len(__magic_name__ ) != 2 or len(a[0] ) != 2 or len(__magic_name__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__magic_name__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase__ = len(__magic_name__ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [
[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ , __magic_name__ )
]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ , __magic_name__ )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( __magic_name__ : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__magic_name__ ), len(matrix[0] )
def UpperCamelCase ( __magic_name__ : list ) -> None:
"""simple docstring"""
print("""\n""".join(str(__magic_name__ ) for line in matrix ) )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ ) == (2, 2):
return default_matrix_multiplication(__magic_name__ , __magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(__magic_name__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__magic_name__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ )[1] != matrix_dimensions(__magic_name__ )[0]:
lowercase__ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*__magic_name__ , *__magic_name__ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(__magic_name__ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(__magic_name__ , __magic_name__ )
# Removing the additional zeros
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 305 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = checkpoint
lowercase__ = {}
lowercase__ = vae_state_dict["encoder.conv_in.weight"]
lowercase__ = vae_state_dict["encoder.conv_in.bias"]
lowercase__ = vae_state_dict["encoder.conv_out.weight"]
lowercase__ = vae_state_dict["encoder.conv_out.bias"]
lowercase__ = vae_state_dict["encoder.norm_out.weight"]
lowercase__ = vae_state_dict["encoder.norm_out.bias"]
lowercase__ = vae_state_dict["decoder.conv_in.weight"]
lowercase__ = vae_state_dict["decoder.conv_in.bias"]
lowercase__ = vae_state_dict["decoder.conv_out.weight"]
lowercase__ = vae_state_dict["decoder.conv_out.bias"]
lowercase__ = vae_state_dict["decoder.norm_out.weight"]
lowercase__ = vae_state_dict["decoder.norm_out.bias"]
lowercase__ = vae_state_dict["quant_conv.weight"]
lowercase__ = vae_state_dict["quant_conv.bias"]
lowercase__ = vae_state_dict["post_quant_conv.weight"]
lowercase__ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowercase__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowercase__ = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(__magic_name__ )
}
# Retrieves the keys for the decoder up blocks only
lowercase__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowercase__ = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(__magic_name__ )
}
for i in range(__magic_name__ ):
lowercase__ = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
lowercase__ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
lowercase__ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
lowercase__ = renew_vae_resnet_paths(__magic_name__ )
lowercase__ = {"old": f'''down.{i}.block''', "new": f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
lowercase__ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowercase__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
lowercase__ = renew_vae_resnet_paths(__magic_name__ )
lowercase__ = {"old": f'''mid.block_{i}''', "new": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
lowercase__ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowercase__ = renew_vae_attention_paths(__magic_name__ )
lowercase__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
conv_attn_to_linear(__magic_name__ )
for i in range(__magic_name__ ):
lowercase__ = num_up_blocks - 1 - i
lowercase__ = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
lowercase__ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
lowercase__ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
lowercase__ = renew_vae_resnet_paths(__magic_name__ )
lowercase__ = {"old": f'''up.{block_id}.block''', "new": f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
lowercase__ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowercase__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowercase__ = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
lowercase__ = renew_vae_resnet_paths(__magic_name__ )
lowercase__ = {"old": f'''mid.block_{i}''', "new": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
lowercase__ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowercase__ = renew_vae_attention_paths(__magic_name__ )
lowercase__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ )
conv_attn_to_linear(__magic_name__ )
return new_checkpoint
def _A ( __magic_name__ , __magic_name__ , ):
# Only support V1
lowercase__ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowercase__ = io.BytesIO(r.content )
lowercase__ = OmegaConf.load(__magic_name__ )
lowercase__ = 512
lowercase__ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowercase__ = {}
with safe_open(__magic_name__ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowercase__ = f.get_tensor(__magic_name__ )
else:
lowercase__ = torch.load(__magic_name__ , map_location=__magic_name__ )["state_dict"]
# Convert the VAE model.
lowercase__ = create_vae_diffusers_config(__magic_name__ , image_size=__magic_name__ )
lowercase__ = custom_convert_ldm_vae_checkpoint(__magic_name__ , __magic_name__ )
lowercase__ = AutoencoderKL(**__magic_name__ )
vae.load_state_dict(__magic_name__ )
vae.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_snake_case = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 201 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _A ( __magic_name__ ):
lowercase__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = StableDiffusionLatentUpscalePipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase = frozenset([] )
__lowerCamelCase = True
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 4
lowercase__ = (16, 16)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=_lowercase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=_lowercase , only_cross_attention=_lowercase , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
lowercase__ = EulerDiscreteScheduler(prediction_type="sample" )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
lowercase__ = CLIPTextModel(_lowercase )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase ( self :Dict , _lowercase :Union[str, Any] , _lowercase :int=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(_lowercase )
else:
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "cpu"
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
lowercase__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1e-3 )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = 2
lowercase__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ = getattr(_lowercase , scheduler_enum.name )
lowercase__ = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ = pipe(**_lowercase )[0]
outputs.append(_lowercase )
assert check_same_shape(_lowercase )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
lowercase__ = pipe(_lowercase , generator=_lowercase , output_type="latent" ).images
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 201 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['image_processor', 'tokenizer']
lowerCamelCase__ : str = 'CLIPImageProcessor'
lowerCamelCase__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, lowercase_=None, lowercase_=None, **lowercase_ ) -> int:
"""simple docstring"""
a__ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowercase_, )
a__ =kwargs.pop('''feature_extractor''' )
a__ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_, lowercase_ )
def __call__( self, lowercase_=None, lowercase_=None, lowercase_=None, **lowercase_ ) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
a__ =self.tokenizer(lowercase_, return_tensors=lowercase_, **lowercase_ )
if images is not None:
a__ =self.image_processor(lowercase_, return_tensors=lowercase_, **lowercase_ )
if text is not None and images is not None:
a__ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ), tensor_type=lowercase_ )
def _UpperCAmelCase ( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_, **lowercase_ )
def _UpperCAmelCase ( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowercase_, **lowercase_ )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =self.tokenizer.model_input_names
a__ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 188 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = IFInpaintingSuperResolutionPipeline
lowerCamelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCamelCase__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCamelCase__ : str = PipelineTesterMixin.required_optional_params - {'latents'}
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self, lowercase_, lowercase_=0 ) -> Tuple:
"""simple docstring"""
if str(lowercase_ ).startswith('''mps''' ):
a__ =torch.manual_seed(lowercase_ )
else:
a__ =torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
a__ =floats_tensor((1, 3, 16, 16), rng=random.Random(lowercase_ ) ).to(lowercase_ )
a__ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase_ ) ).to(lowercase_ )
a__ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase_ ) ).to(lowercase_ )
a__ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' )
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2, )
| 188 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ : str = str(__lowerCAmelCase )
return len(__lowerCAmelCase ) == 9 and set(__lowerCAmelCase ) == set("""123456789""" )
def _lowercase ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
SCREAMING_SNAKE_CASE__ : Optional[int] = 10_0002 * base_num
if is_9_pandigital(__lowerCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
SCREAMING_SNAKE_CASE__ : List[str] = 100_2003 * base_num
if is_9_pandigital(__lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 56 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __a (metaclass=UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self , *_a , **_a ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> int:
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 56 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.