code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import re
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
a = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 109 | """simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = IFInpaintingSuperResolutionPipeline
__magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
__magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _UpperCAmelCase ( self : Union[str, Any] ):
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ):
if str(UpperCamelCase__ ).startswith("mps" ):
A__ : Any =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : List[str] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCAmelCase ( self : Tuple ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase ( self : Dict ):
self._test_save_load_local()
def _UpperCAmelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 656 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : int = False
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCamelCase : List[str] = parser.parse_args()
__lowerCamelCase : Union[str, Any] = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
__lowerCamelCase : int = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
__lowerCamelCase : Any = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
__lowerCamelCase : Optional[int] = reader.read()
__lowerCamelCase : Optional[Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
__lowerCamelCase : str = UNetaDModel(**config)
else:
__lowerCamelCase : Tuple = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
__lowerCamelCase : int = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowerCamelCase : List[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowerCamelCase : Optional[int] = config[key]
del config[key]
__lowerCamelCase : str = [k.replace("UNetRes", "") for k in config["down_block_types"]]
__lowerCamelCase : Union[str, Any] = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
__lowerCamelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
__lowerCamelCase : Dict = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
__lowerCamelCase : Dict = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
__lowerCamelCase : int = param_value
__lowerCamelCase : Tuple = True
if not has_changed:
__lowerCamelCase : Dict = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 459 |
'''simple docstring'''
from functools import lru_cache
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = 2
lowercase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCAmelCase_ )
if n > 1:
factors.add(lowerCAmelCase_ )
return factors
@lru_cache
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return len(unique_prime_factors(lowerCAmelCase_ ) )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return len(set(lowerCAmelCase_ ) ) in (0, 1)
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = 2
while True:
# Increment each value of a generated range
lowercase = [base + i for i in range(lowerCAmelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase = [upf_len(lowerCAmelCase_ ) for x in group]
checker.append(lowerCAmelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCAmelCase_ ):
return group
# Increment our base variable by 1
base += 1
def UpperCAmelCase_ ( lowerCAmelCase_ = 4 ):
"""simple docstring"""
lowercase = run(lowerCAmelCase_ )
return results[0] if len(lowerCAmelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 459 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __UpperCamelCase ( *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__a =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
__a = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
__a = [
{
"""image""": Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->Any:
'''simple docstring'''
__a = vqa_pipeline(_A , top_k=1 )
self.assertEqual(
_A , [
[{'score': ANY(_A ), 'answer': ANY(_A )}],
[{'score': ANY(_A ), 'answer': ANY(_A )}],
] , )
@require_torch
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
__a = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__a = """How many cats are there?"""
__a = vqa_pipeline(image=_A , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_A , [{'score': ANY(_A ), 'answer': ANY(_A )}, {'score': ANY(_A ), 'answer': ANY(_A )}] )
__a = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_A , [{'score': ANY(_A ), 'answer': ANY(_A )}, {'score': ANY(_A ), 'answer': ANY(_A )}] )
@slow
@require_torch
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
__a = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__a = """How many cats are there?"""
__a = vqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
__a = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
__a = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
pass | 448 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : int = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : str = """convnextv2"""
def __init__( self , _A=3 , _A=4 , _A=4 , _A=None , _A=None , _A="gelu" , _A=0.02 , _A=1e-1_2 , _A=0.0 , _A=2_2_4 , _A=None , _A=None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : int = patch_size
UpperCamelCase : Dict = num_stages
UpperCamelCase : Tuple = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCamelCase : str = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = drop_path_rate
UpperCamelCase : Any = image_size
UpperCamelCase : Union[str, Any] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 102 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
@require_torch
def lowerCAmelCase_ ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
SCREAMING_SNAKE_CASE__ = BertTokenizer.from_pretrained("bert-base-uncased" )
SCREAMING_SNAKE_CASE__ = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE__ = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE__ = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
SCREAMING_SNAKE_CASE__ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
SCREAMING_SNAKE_CASE__ = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE__ = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE__ = 4
def _map_to_encoder_decoder_inputs(_snake_case : Tuple ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE__ = tokenizer(batch["article"] , padding="max_length" , truncation=_snake_case , max_length=512 )
SCREAMING_SNAKE_CASE__ = tokenizer(batch["highlights"] , padding="max_length" , truncation=_snake_case , max_length=128 )
SCREAMING_SNAKE_CASE__ = inputs.input_ids
SCREAMING_SNAKE_CASE__ = inputs.attention_mask
SCREAMING_SNAKE_CASE__ = outputs.input_ids
SCREAMING_SNAKE_CASE__ = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
SCREAMING_SNAKE_CASE__ = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case : List[Any] ):
SCREAMING_SNAKE_CASE__ = pred.label_ids
SCREAMING_SNAKE_CASE__ = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
SCREAMING_SNAKE_CASE__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
SCREAMING_SNAKE_CASE__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy="steps" , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE__ = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 706 | """simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_A = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
SCREAMING_SNAKE_CASE__ = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
SCREAMING_SNAKE_CASE__ = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 538 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 45 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( _a ,unittest.TestCase):
lowerCamelCase_ = LongformerTokenizer
lowerCamelCase_ = True
lowerCamelCase_ = LongformerTokenizerFast
lowerCamelCase_ = True
def _snake_case ( self : str ) ->Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ :Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a__ :Optional[Any] = dict(zip(__A , range(len(__A ) ) ) )
a__ :Union[str, Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a__ :Optional[int] = {"unk_token": "<unk>"}
a__ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def _snake_case ( self : Tuple , **__A : int ) ->int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self : Tuple , **__A : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self : List[str] , __A : int ) ->Dict:
"""simple docstring"""
a__ :Union[str, Any] = "lower newer"
a__ :Union[str, Any] = "lower newer"
return input_text, output_text
def _snake_case ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a__ :Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ :List[Any] = "lower newer"
a__ :int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a__ :Union[str, Any] = tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A , __A )
a__ :str = tokens + [tokenizer.unk_token]
a__ :List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _snake_case ( self : Tuple ) ->List[Any]:
"""simple docstring"""
a__ :Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _snake_case ( self : Any ) ->int:
"""simple docstring"""
a__ :List[str] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a__ :str = tokenizer.encode("sequence builders" , add_special_tokens=__A )
a__ :Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=__A )
a__ :List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=__A , add_prefix_space=__A )
a__ :List[str] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__A , add_prefix_space=__A )
a__ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A )
a__ :Any = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self : str ) ->Dict:
"""simple docstring"""
a__ :List[Any] = self.get_tokenizer()
a__ :Any = "Encode this sequence."
a__ :Optional[int] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a__ :str = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
a__ :int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A , __A )
a__ :Optional[Any] = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
a__ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A , __A )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a__ :str = tokenizer.encode(__A , add_special_tokens=__A )
a__ :List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A , __A )
# Testing spaces after special tokens
a__ :Any = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__A , lstrip=__A , rstrip=__A )} ) # mask token has a left space
a__ :str = tokenizer.convert_tokens_to_ids(__A )
a__ :Any = "Encode <mask> sequence"
a__ :Union[str, Any] = "Encode <mask>sequence"
a__ :Optional[int] = tokenizer.encode(__A )
a__ :str = encoded.index(__A )
a__ :Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A , __A )
a__ :int = tokenizer.encode(__A )
a__ :Tuple = encoded.index(__A )
a__ :Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A , __A )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
pass
def _snake_case ( self : Optional[int] ) ->Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
a__ :Dict = self.tokenizer_class.from_pretrained(__A , **__A )
a__ :Dict = "A, <mask> AllenNLP sentence."
a__ :Any = tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
a__ :Union[str, Any] = tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a__ :Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a__ :Any = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__A , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__A , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _snake_case ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a__ :Tuple = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
a__ :Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a__ :List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __A )
self.assertEqual(post_processor_state["add_prefix_space"] , __A )
self.assertEqual(post_processor_state["trim_offsets"] , __A )
def _snake_case ( self : int ) ->Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ :List[str] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a__ :List[str] = F'''{text_of_1_token} {text_of_1_token}'''
a__ :List[Any] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
a__ :List[Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
a__ :Tuple = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
a__ :List[Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
a__ :Tuple = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
a__ :List[Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
a__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
a__ :Union[str, Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
a__ :List[Any] = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a__ :List[Any] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
a__ :List[str] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) , )
a__ :str = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
a__ :List[str] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
a__ :List[str] = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
a__ :Any = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
| 395 | 0 |
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Dict = multiprocessing.Manager()
SCREAMING_SNAKE_CASE_ :Tuple = manager.list()
SCREAMING_SNAKE_CASE_ :List[Any] = multiprocessing.Process(target=a__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
SCREAMING_SNAKE_CASE_ :int = shutil.rmtree
SCREAMING_SNAKE_CASE_ :Optional[Any] = os.rmdir
SCREAMING_SNAKE_CASE_ :List[Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
SCREAMING_SNAKE_CASE_ :str = {}
with swallow_io():
with time_limit(a__ ):
exec(a__ , a__ )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
SCREAMING_SNAKE_CASE_ :Optional[int] = rmtree
SCREAMING_SNAKE_CASE_ :Any = rmdir
SCREAMING_SNAKE_CASE_ :Optional[Any] = chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
def signal_handler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , a__ )
signal.signal(signal.SIGALRM , a__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE_ :int = WriteOnlyStringIO()
with contextlib.redirect_stdout(a__ ):
with contextlib.redirect_stderr(a__ ):
with redirect_stdin(a__ ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(a__ ):
yield dirname
class __lowerCAmelCase( lowerCAmelCase__ ):
pass
class __lowerCAmelCase( io.StringIO ):
def _lowercase ( self : Optional[int] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
raise OSError
def _lowercase ( self : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
raise OSError
def _lowercase ( self : Dict , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
raise OSError
def _lowercase ( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return False
class __lowerCAmelCase( contextlib._RedirectStream ): # type: ignore
__snake_case : str = 'stdin'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
if root == ".":
yield
return
SCREAMING_SNAKE_CASE_ :List[str] = os.getcwd()
os.chdir(a__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
SCREAMING_SNAKE_CASE_ :Dict = None
SCREAMING_SNAKE_CASE_ :List[Any] = None
import os
SCREAMING_SNAKE_CASE_ :Dict = '1'
SCREAMING_SNAKE_CASE_ :Dict = None
SCREAMING_SNAKE_CASE_ :Any = None
SCREAMING_SNAKE_CASE_ :Any = None
SCREAMING_SNAKE_CASE_ :Dict = None
SCREAMING_SNAKE_CASE_ :List[Any] = None
SCREAMING_SNAKE_CASE_ :str = None
SCREAMING_SNAKE_CASE_ :Dict = None
SCREAMING_SNAKE_CASE_ :Dict = None
SCREAMING_SNAKE_CASE_ :str = None
SCREAMING_SNAKE_CASE_ :List[Any] = None
SCREAMING_SNAKE_CASE_ :Any = None
SCREAMING_SNAKE_CASE_ :int = None
SCREAMING_SNAKE_CASE_ :List[str] = None
SCREAMING_SNAKE_CASE_ :Optional[int] = None
SCREAMING_SNAKE_CASE_ :List[str] = None
SCREAMING_SNAKE_CASE_ :List[Any] = None
SCREAMING_SNAKE_CASE_ :Union[str, Any] = None
SCREAMING_SNAKE_CASE_ :Union[str, Any] = None
SCREAMING_SNAKE_CASE_ :Union[str, Any] = None
SCREAMING_SNAKE_CASE_ :Union[str, Any] = None
SCREAMING_SNAKE_CASE_ :Dict = None
SCREAMING_SNAKE_CASE_ :Union[str, Any] = None
SCREAMING_SNAKE_CASE_ :Optional[Any] = None
SCREAMING_SNAKE_CASE_ :Dict = None
SCREAMING_SNAKE_CASE_ :Optional[int] = None
SCREAMING_SNAKE_CASE_ :Dict = None
SCREAMING_SNAKE_CASE_ :Dict = None
import shutil
SCREAMING_SNAKE_CASE_ :Dict = None
SCREAMING_SNAKE_CASE_ :str = None
SCREAMING_SNAKE_CASE_ :Optional[Any] = None
import subprocess
SCREAMING_SNAKE_CASE_ :Any = None # type: ignore
SCREAMING_SNAKE_CASE_ :List[Any] = None
import sys
SCREAMING_SNAKE_CASE_ :Optional[Any] = None
SCREAMING_SNAKE_CASE_ :Tuple = None
SCREAMING_SNAKE_CASE_ :Optional[Any] = None
SCREAMING_SNAKE_CASE_ :List[Any] = None
SCREAMING_SNAKE_CASE_ :Any = None
| 716 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
SCREAMING_SNAKE_CASE_ :Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = 'segformer.encoder.' + key
if key.startswith('backbone' ):
SCREAMING_SNAKE_CASE_ :Any = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE_ :List[Any] = key[key.find('patch_embed' ) + len('patch_embed' )]
SCREAMING_SNAKE_CASE_ :List[str] = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
SCREAMING_SNAKE_CASE_ :List[Any] = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE_ :Tuple = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
SCREAMING_SNAKE_CASE_ :str = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE_ :Any = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE_ :Optional[Any] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE_ :Optional[Any] = key[key.find('block' ) + len('block' )]
SCREAMING_SNAKE_CASE_ :Any = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
SCREAMING_SNAKE_CASE_ :List[str] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
SCREAMING_SNAKE_CASE_ :Optional[int] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
SCREAMING_SNAKE_CASE_ :Optional[int] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE_ :Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE_ :Any = key.replace('linear_fuse.conv' , 'linear_fuse' )
SCREAMING_SNAKE_CASE_ :int = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE_ :Tuple = key[key.find('linear_c' ) + len('linear_c' )]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE )-1}' )
if key.startswith('head' ):
SCREAMING_SNAKE_CASE_ :int = key.replace('head' , 'classifier' )
SCREAMING_SNAKE_CASE_ :Tuple = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE_ :Dict = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
SCREAMING_SNAKE_CASE_ :str = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ :Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE_ :List[Any] = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE_ :Optional[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE_ :Optional[int] = kv_bias[
config.hidden_sizes[i] :
]
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ :int = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :List[str] = SegformerConfig()
SCREAMING_SNAKE_CASE_ :Any = False
# set attributes based on model_name
SCREAMING_SNAKE_CASE_ :str = 'huggingface/label-files'
if "segformer" in model_name:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
SCREAMING_SNAKE_CASE_ :Optional[Any] = 150
SCREAMING_SNAKE_CASE_ :Tuple = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (1, 150, 128, 128)
elif "city" in model_name:
SCREAMING_SNAKE_CASE_ :Optional[int] = 19
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 'cityscapes-id2label.json'
SCREAMING_SNAKE_CASE_ :str = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
SCREAMING_SNAKE_CASE_ :Dict = True
SCREAMING_SNAKE_CASE_ :Tuple = model_name[4:6]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 1000
SCREAMING_SNAKE_CASE_ :str = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ :Optional[Any] = (1, 1000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
SCREAMING_SNAKE_CASE_ :List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ :int = idalabel
SCREAMING_SNAKE_CASE_ :Tuple = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ :str = 256
elif size == "b2":
SCREAMING_SNAKE_CASE_ :Optional[Any] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ :List[Any] = 768
SCREAMING_SNAKE_CASE_ :Optional[Any] = [3, 4, 6, 3]
elif size == "b3":
SCREAMING_SNAKE_CASE_ :List[str] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ :Optional[Any] = 768
SCREAMING_SNAKE_CASE_ :Any = [3, 4, 18, 3]
elif size == "b4":
SCREAMING_SNAKE_CASE_ :List[Any] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ :Optional[Any] = 768
SCREAMING_SNAKE_CASE_ :Any = [3, 8, 27, 3]
elif size == "b5":
SCREAMING_SNAKE_CASE_ :str = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ :Optional[int] = 768
SCREAMING_SNAKE_CASE_ :str = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
SCREAMING_SNAKE_CASE_ :List[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE , align=SCREAMING_SNAKE_CASE , do_random_crop=SCREAMING_SNAKE_CASE )
# prepare image
SCREAMING_SNAKE_CASE_ :Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ :List[str] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
SCREAMING_SNAKE_CASE_ :Any = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
else:
SCREAMING_SNAKE_CASE_ :Dict = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
SCREAMING_SNAKE_CASE_ :List[str] = rename_keys(SCREAMING_SNAKE_CASE , encoder_only=SCREAMING_SNAKE_CASE )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
if encoder_only:
SCREAMING_SNAKE_CASE_ :Any = False
SCREAMING_SNAKE_CASE_ :Union[str, Any] = SegformerForImageClassification(SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ :List[str] = SegformerForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE_ :List[str] = model(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ :Optional[int] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ :str = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
SCREAMING_SNAKE_CASE_ :Tuple = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :str = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
SCREAMING_SNAKE_CASE_ :List[str] = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :Any = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
SCREAMING_SNAKE_CASE_ :Dict = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 233 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : str = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'mvp'
snake_case_ = ['past_key_values']
snake_case_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple , a_ : Tuple=5_02_67 , a_ : str=10_24 , a_ : Tuple=12 , a_ : Union[str, Any]=40_96 , a_ : List[str]=16 , a_ : Union[str, Any]=12 , a_ : List[Any]=40_96 , a_ : Tuple=16 , a_ : Optional[int]=0.0 , a_ : Union[str, Any]=0.0 , a_ : str="gelu" , a_ : Optional[Any]=10_24 , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.0 , a_ : Optional[int]=0.0 , a_ : Any=0.0_2 , a_ : Tuple=0.0 , a_ : Tuple=False , a_ : List[Any]=True , a_ : List[Any]=1 , a_ : int=0 , a_ : List[Any]=2 , a_ : str=True , a_ : List[str]=2 , a_ : str=2 , a_ : Any=False , a_ : str=1_00 , a_ : List[Any]=8_00 , **a_ : Optional[Any] , ):
"""simple docstring"""
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = d_model
lowerCamelCase__ = encoder_ffn_dim
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = encoder_attention_heads
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = encoder_layerdrop
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = use_cache
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ = use_prompt
lowerCamelCase__ = prompt_length
lowerCamelCase__ = prompt_mid_dim
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , **a_ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , a_ ):
lowerCamelCase__ = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 165 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : Union[str, Any] = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 165 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
a__ = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
a__ = requests.get(base_url + url).json()[0]['''urls'''][0]['''src''']
return requests.get(lowerCamelCase_).content
if __name__ == "__main__":
__a : List[str] = input('Enter Video/IGTV url: ').strip()
__a : List[Any] = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 720 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : List[str] = logging.get_logger(__name__)
__a : int = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='vit_mae'
def __init__( self: str , __A: str=768 , __A: Union[str, Any]=12 , __A: str=12 , __A: List[Any]=3072 , __A: List[Any]="gelu" , __A: Union[str, Any]=0.0 , __A: Tuple=0.0 , __A: str=0.0_2 , __A: Tuple=1e-12 , __A: Dict=224 , __A: List[Any]=16 , __A: Tuple=3 , __A: str=True , __A: Optional[Any]=16 , __A: List[Any]=512 , __A: Optional[Any]=8 , __A: Dict=2048 , __A: str=0.7_5 , __A: str=False , **__A: Tuple , ):
'''simple docstring'''
super().__init__(**__A )
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = qkv_bias
a__ = decoder_num_attention_heads
a__ = decoder_hidden_size
a__ = decoder_num_hidden_layers
a__ = decoder_intermediate_size
a__ = mask_ratio
a__ = norm_pix_loss
| 200 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ : Any ={
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str =[
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple =[
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCAmelCase__ : Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101 |
"""simple docstring"""
from math import sqrt
def snake_case ( lowerCAmelCase_ = 1000000 ) -> int:
_snake_case = 0
_snake_case = 0
_snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCAmelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 103 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
A : Tuple = logging.get_logger(__name__)
def snake_case__ ( _snake_case : Tuple , _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = []
def parse_line(_snake_case : Tuple ):
for line in fp:
if isinstance(_snake_case , _snake_case ):
UpperCamelCase__ = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(_snake_case ) > 0:
UpperCamelCase__ = "\n".join(_snake_case )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(_snake_case )
buffer.clear()
continue
else:
UpperCamelCase__ = line.strip()
buffer.append(_snake_case )
if from_gh:
for filename in os.listdir(_snake_case ):
UpperCamelCase__ = os.path.join(_snake_case , _snake_case )
if not os.path.isdir(_snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(_snake_case ) as fp:
parse_line(_snake_case )
else:
try:
with zipfile.ZipFile(_snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(_snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_snake_case ) as fp:
parse_line(_snake_case )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case__ ( _snake_case : Optional[int] , _snake_case : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = [os.path.join(_snake_case , _snake_case ) for p in os.listdir(_snake_case ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_snake_case , _snake_case ) )
return selected_warnings
if __name__ == "__main__":
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
return values.split("," )
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
A : Tuple = parser.parse_args()
A : Optional[int] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
A : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
A : Tuple = extract_warnings(args.output_dir, args.targets)
A : Union[str, Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 304 | """simple docstring"""
def snake_case__ ( _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase__ = _modexpt(_snake_case , exponent // 2 , _snake_case ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_snake_case , exponent - 1 , _snake_case )) % modulo_value
def snake_case__ ( _snake_case : int = 17_77 , _snake_case : int = 18_55 , _snake_case : int = 8 ):
"""simple docstring"""
UpperCamelCase__ = base
for _ in range(1 , _snake_case ):
UpperCamelCase__ = _modexpt(_snake_case , _snake_case , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }") | 304 | 1 |
'''simple docstring'''
import numpy as np
__SCREAMING_SNAKE_CASE = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ) -> None:
'''simple docstring'''
a__ : int = np.array(A__ )
def __lowerCAmelCase ( self : int , A__ : str ) -> np.ndarray:
'''simple docstring'''
a__ , a__ : Optional[Any] = np.where(letter == self.SQUARE )
a__ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __lowerCAmelCase ( self : int , A__ : int , A__ : int ) -> str:
'''simple docstring'''
a__ : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __lowerCAmelCase ( self : str , A__ : str ) -> str:
'''simple docstring'''
a__ : Optional[Any] = message.lower()
a__ : Tuple = message.replace(''' ''' , '''''' )
a__ : Optional[int] = message.replace('''j''' , '''i''' )
a__ : Any = np.empty((2, len(A__ )) )
for letter_index in range(len(A__ ) ):
a__ : Any = self.letter_to_numbers(message[letter_index] )
a__ : Any = numbers[0]
a__ : int = numbers[1]
a__ : Union[str, Any] = first_step.reshape(2 * len(A__ ) )
a__ : str = ''''''
for numbers_index in range(len(A__ ) ):
a__ : Tuple = int(second_step[numbers_index * 2] )
a__ : Tuple = int(second_step[(numbers_index * 2) + 1] )
a__ : Union[str, Any] = self.numbers_to_letter(A__ , A__ )
a__ : Union[str, Any] = encoded_message + letter
return encoded_message
def __lowerCAmelCase ( self : Dict , A__ : str ) -> str:
'''simple docstring'''
a__ : Tuple = message.lower()
message.replace(''' ''' , '''''' )
a__ : int = np.empty(2 * len(A__ ) )
for letter_index in range(len(A__ ) ):
a__ : Optional[int] = self.letter_to_numbers(message[letter_index] )
a__ : Tuple = numbers[0]
a__ : List[Any] = numbers[1]
a__ : Optional[Any] = first_step.reshape((2, len(A__ )) )
a__ : Any = ''''''
for numbers_index in range(len(A__ ) ):
a__ : Union[str, Any] = int(second_step[0, numbers_index] )
a__ : Union[str, Any] = int(second_step[1, numbers_index] )
a__ : Dict = self.numbers_to_letter(A__ , A__ )
a__ : List[str] = decoded_message + letter
return decoded_message
| 688 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__SCREAMING_SNAKE_CASE = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__SCREAMING_SNAKE_CASE = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__SCREAMING_SNAKE_CASE = reader.read()
__SCREAMING_SNAKE_CASE = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__SCREAMING_SNAKE_CASE = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE = config[key]
del config[key]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['down_block_types']]
__SCREAMING_SNAKE_CASE = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__SCREAMING_SNAKE_CASE = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__SCREAMING_SNAKE_CASE = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__SCREAMING_SNAKE_CASE = param_value
__SCREAMING_SNAKE_CASE = True
if not has_changed:
__SCREAMING_SNAKE_CASE = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 688 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : int , a_ : int ):
if not isinstance(a_ , a_ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(a_ , a_ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__a = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
import numpy as np
import datasets
UpperCAmelCase_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
UpperCAmelCase_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
UpperCAmelCase_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def UpperCamelCase__ ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
# convert to numpy arrays
__a = np.array(UpperCamelCase )
__a = np.array(UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
__a = X - np.mean(UpperCamelCase )
__a = np.cov(reference_distribution.T )
try:
__a = np.linalg.inv(UpperCamelCase )
except np.linalg.LinAlgError:
__a = np.linalg.pinv(UpperCamelCase )
__a = np.dot(UpperCamelCase , UpperCamelCase )
__a = np.dot(UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 490 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
A__ : Optional[Any] =0
A__ : List[Any] =number
while duplicate > 0:
A__ , A__ : List[Any] =divmod(__snake_case, 10 )
fact_sum += factorial(__snake_case )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
__snake_case : List[Any] = int(input('Enter number: ').strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 215 |
'''simple docstring'''
import argparse
import os
import re
__snake_case : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
__snake_case : Optional[Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__snake_case : Tuple = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__snake_case : Dict = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__snake_case : Union[str, Any] = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__snake_case : Any = re.compile(r'\[([^\]]+)\]')
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
A__ : Optional[int] =_re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def __lowerCamelCase ( __snake_case : str, __snake_case : Union[str, Any]="", __snake_case : Tuple=None, __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
A__ : str =0
A__ : List[Any] =code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
A__ : Union[str, Any] =["""\n""".join(lines[:index] )]
else:
A__ : Tuple =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ : int =[lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__snake_case ) )
if index < len(__snake_case ) - 1:
A__ : Any =[lines[index + 1]]
index += 1
else:
A__ : List[str] =[]
else:
blocks.append("""\n""".join(__snake_case ) )
A__ : Any =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append("""\n""".join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __lowerCamelCase ( __snake_case : Dict ) -> Dict:
"""simple docstring"""
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace("""_""", """""" )
return _inner
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
def noop(__snake_case : int ):
return x
if key is None:
A__ : Optional[int] =noop
# Constants are all uppercase, they go first.
A__ : Tuple =[obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ : List[str] =[obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
A__ : Union[str, Any] =[obj for obj in objects if not key(__snake_case )[0].isupper()]
A__ : Union[str, Any] =ignore_underscore(__snake_case )
return sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case )
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def _replace(__snake_case : Any ):
A__ : str =match.groups()[0]
if "," not in imports:
return f"[{imports}]"
A__ : Tuple =[part.strip().replace("""\"""", """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : int =keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(__snake_case )] ) + "]"
A__ : int =import_statement.split("""\n""" )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ : Optional[int] =2 if lines[1].strip() == """[""" else 1
A__ : Optional[int] =[(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ : List[str] =sort_objects(__snake_case, key=lambda __snake_case : x[1] )
A__ : Tuple =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ : List[Any] =_re_bracket_content.sub(_replace, lines[1] )
else:
A__ : List[str] =[part.strip().replace("""\"""", """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : List[Any] =keys[:-1]
A__ : List[Any] =get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
A__ : Union[str, Any] =_re_bracket_content.sub(_replace, __snake_case )
return import_statement
def __lowerCamelCase ( __snake_case : List[str], __snake_case : str=True ) -> Optional[int]:
"""simple docstring"""
with open(__snake_case, """r""" ) as f:
A__ : str =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ : Any =split_code_in_indented_blocks(
__snake_case, start_prompt="""_import_structure = {""", end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ : Optional[Any] =main_blocks[block_idx]
A__ : Optional[Any] =block.split("""\n""" )
# Get to the start of the imports.
A__ : Optional[Any] =0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ : Dict =len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ : str ="""\n""".join(block_lines[line_idx:-1] )
A__ : Dict =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ : Dict =split_code_in_indented_blocks(__snake_case, indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ : int =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ : int =[(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ : str =[(i, key) for i, key in enumerate(__snake_case ) if key is not None]
A__ : Optional[int] =[x[0] for x in sorted(__snake_case, key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ : Optional[Any] =0
A__ : int =[]
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A__ : Union[str, Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
A__ : Any ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(__snake_case, """w""" ) as f:
f.write("""\n""".join(__snake_case ) )
def __lowerCamelCase ( __snake_case : Dict=True ) -> Any:
"""simple docstring"""
A__ : Optional[Any] =[]
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
A__ : Tuple =sort_imports(os.path.join(__snake_case, """__init__.py""" ), check_only=__snake_case )
if result:
A__ : str =[os.path.join(__snake_case, """__init__.py""" )]
if len(__snake_case ) > 0:
raise ValueError(f"Would overwrite {len(__snake_case )} files, run `make style`." )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__snake_case : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 215 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class snake_case__ (_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = """dinat"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , __lowerCamelCase : int=4 , __lowerCamelCase : int=3 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Any=[3, 4, 6, 5] , __lowerCamelCase : int=[2, 4, 8, 16] , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Any=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowerCamelCase : Dict=3.0 , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Union[str, Any]=1e-5 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=None , **__lowerCamelCase : List[Any] , ) -> Union[str, Any]:
super().__init__(**__lowerCamelCase )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__lowerCamelCase )
a = num_heads
a = kernel_size
a = dilations
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
a = layer_scale_init_value
a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 662 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=False ) -> Tuple:
"""simple docstring"""
try:
a = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a = default
else:
# KEY is set, convert it to True or False.
try:
a = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase__ : Optional[Any] = parse_flag_from_env("""RUN_SLOW""", default=False)
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests, '''test is slow''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available(), '''test requires only a CPU''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available(), '''test requires a GPU''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available(), '''test requires a XPU''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_mps_available(), '''test requires a `mps` backend support in `torch`''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Any:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available(), '''test requires the Hugging Face suite''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available(), '''test requires the bitsandbytes library''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available(), '''test requires TPU''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1, '''test requires a GPU''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1, '''test requires a XPU''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1, '''test requires multiple GPUs''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1, '''test requires multiple XPUs''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available(), '''test requires safetensors''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available(), '''test requires DeepSpeed''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''', '''1.12.0''' ), '''test requires torch version >= 1.12.0''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_=None, snake_case_=None ) -> Union[str, Any]:
"""simple docstring"""
if test_case is None:
return partial(_A, version=_A )
return unittest.skipUnless(is_torch_version('''>=''', _A ), f"""test requires torch version >= {version}""" )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available(), '''test requires Tensorboard''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available(), '''test requires wandb''' )(_A )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available(), '''test requires comet_ml''' )(_A )
UpperCamelCase__ : Any = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available, '''test requires at least one tracker to be available and for `comet_ml` to not be installed''', )(_A )
class lowerCamelCase_ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple ):
'''simple docstring'''
a = tempfile.mkdtemp()
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowerCamelCase )
class lowerCamelCase_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Union[mock.Mock, List[mock.Mock]] ):
'''simple docstring'''
a = mocks if isinstance(__lowerCamelCase ,(tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]:
"""simple docstring"""
a = AcceleratorState()
a = tensor[None].clone().to(state.device )
a = gather(_A ).cpu()
a = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i], _A ):
return False
return True
class lowerCamelCase_ :
def __init__( self : Optional[int] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = returncode
a = stdout
a = stderr
async def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str:
"""simple docstring"""
while True:
a = await stream.readline()
if line:
callback(_A )
else:
break
async def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None, snake_case_=None, snake_case_=None, snake_case_=False, snake_case_=False ) -> Optional[int]:
"""simple docstring"""
if echo:
print('''\nRunning: ''', ''' '''.join(_A ) )
a = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=_A, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=_A, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a = []
a = []
def tee(snake_case_, snake_case_, snake_case_, snake_case_="" ):
a = line.decode('''utf-8''' ).rstrip()
sink.append(_A )
if not quiet:
print(_A, _A, file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout, lambda snake_case_ : tee(_A, _A, sys.stdout, label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr, lambda snake_case_ : tee(_A, _A, sys.stderr, label='''stderr:''' ) ) ),
], timeout=_A, )
return _RunOutput(await p.wait(), _A, _A )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None, snake_case_=None, snake_case_=1_8_0, snake_case_=False, snake_case_=True ) -> List[str]:
"""simple docstring"""
a = asyncio.get_event_loop()
a = loop.run_until_complete(
_stream_subprocess(_A, env=_A, stdin=_A, timeout=_A, quiet=_A, echo=_A ) )
a = ''' '''.join(_A )
if result.returncode > 0:
a = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowerCamelCase_ ( A__ ):
pass
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=False ) -> str:
"""simple docstring"""
try:
a = subprocess.check_output(_A, stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_A, '''decode''' ):
a = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{" ".join(_A )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 387 |
import math
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [True] * n
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE__ = i * 2
while index < n:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = index + i
SCREAMING_SNAKE_CASE__ = [2]
for i in range(3 , _A , 2 ):
if is_prime[i]:
primes.append(_A )
return primes
def UpperCAmelCase_ ( _A = 99_99_66_66_33_33 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = math.floor(math.sqrt(_A ) ) + 1_00
SCREAMING_SNAKE_CASE__ = prime_sieve(_A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE__ = primes[prime_index + 1]
SCREAMING_SNAKE_CASE__ = last_prime**2
SCREAMING_SNAKE_CASE__ = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 493 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __A ( a_ : str ):
re.sub("<n>" ,"" ,a_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(a_ ) )
| 551 |
'''simple docstring'''
from __future__ import annotations
def __A ( a_ : float ,a_ : float ,a_ : float ,):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase ( A , unittest.TestCase ):
lowerCAmelCase_ = DebertaTokenizer
lowerCAmelCase_ = True
lowerCAmelCase_ = DebertaTokenizerFast
def snake_case ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
__lowercase =dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowercase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowercase ={'unk_token': '[UNK]'}
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowercase ) )
def snake_case ( self : Dict , **__lowercase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def snake_case ( self : List[Any] , __lowercase : Tuple ):
"""simple docstring"""
__lowercase ='lower newer'
__lowercase ='lower newer'
return input_text, output_text
def snake_case ( self : str ):
"""simple docstring"""
__lowercase =self.get_tokenizer()
__lowercase ='lower newer'
__lowercase =['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__lowercase =tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowercase =tokens + [tokenizer.unk_token]
__lowercase =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def snake_case ( self : str ):
"""simple docstring"""
__lowercase =self.get_tokenizer()
__lowercase =tokenizer('Hello' , 'World' )
__lowercase =[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , __lowercase )
@slow
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
__lowercase =tokenizer.encode('sequence builders' , add_special_tokens=__lowercase )
__lowercase =tokenizer.encode('multi-sequence build' , add_special_tokens=__lowercase )
__lowercase =tokenizer.encode(
'sequence builders' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowercase =tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowercase =tokenizer.build_inputs_with_special_tokens(__lowercase )
__lowercase =tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =[self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowercase =tokenizer_class.from_pretrained('microsoft/deberta-base' )
__lowercase =[
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
__lowercase =tokenizer(__lowercase , padding=__lowercase )
__lowercase =[tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['input_ids']]
# fmt: off
__lowercase ={
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowercase =[
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , __lowercase )
for expected, decoded in zip(__lowercase , __lowercase ):
self.assertEqual(__lowercase , __lowercase )
| 119 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase ( A , A ):
lowerCAmelCase_ = "dinat"
lowerCAmelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , __lowercase : str=4 , __lowercase : str=3 , __lowercase : int=64 , __lowercase : int=[3, 4, 6, 5] , __lowercase : Union[str, Any]=[2, 4, 8, 16] , __lowercase : Optional[int]=7 , __lowercase : str=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowercase : Any=3.0 , __lowercase : Optional[Any]=True , __lowercase : List[Any]=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : int=0.1 , __lowercase : Union[str, Any]="gelu" , __lowercase : Any=0.0_2 , __lowercase : int=1E-5 , __lowercase : Optional[int]=0.0 , __lowercase : Optional[int]=None , __lowercase : str=None , **__lowercase : Any , ):
"""simple docstring"""
super().__init__(**__lowercase )
__lowercase =patch_size
__lowercase =num_channels
__lowercase =embed_dim
__lowercase =depths
__lowercase =len(__lowercase )
__lowercase =num_heads
__lowercase =kernel_size
__lowercase =dilations
__lowercase =mlp_ratio
__lowercase =qkv_bias
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =drop_path_rate
__lowercase =hidden_act
__lowercase =layer_norm_eps
__lowercase =initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase =int(embed_dim * 2 ** (len(__lowercase ) - 1) )
__lowercase =layer_scale_init_value
__lowercase =['stem'] + [f'''stage{idx}''' for idx in range(1 , len(__lowercase ) + 1 )]
__lowercase , __lowercase =get_aligned_output_features_output_indices(
out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names )
| 119 | 1 |
_UpperCAmelCase : List[Any] = "Tobias Carryer"
from time import time
class lowerCAmelCase_ :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str]=int(time() ) ): # noqa: B008
lowerCAmelCase__ = multiplier
lowerCAmelCase__ = increment
lowerCAmelCase__ = modulo
lowerCAmelCase__ = seed
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
_UpperCAmelCase : Tuple = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 288 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_UpperCAmelCase : Dict = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_UpperCAmelCase : Tuple = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase__ = bs[:]
lowerCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
return pairs
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :List[str] = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :str = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int="replace" , SCREAMING_SNAKE_CASE_ : Tuple="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE_ : Any="<unk>" , SCREAMING_SNAKE_CASE_ : int="<pad>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<mask>" , SCREAMING_SNAKE_CASE_ : Tuple=False , **SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ = errors # how to handle errors in decoding
lowerCAmelCase__ = bytes_to_unicode()
lowerCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = {}
lowerCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __snake_case ( self : List[str] ):
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = word
return word
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase__ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = ''''''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowerCAmelCase__ = ''' ''' + text
return (text, kwargs)
| 288 | 1 |
def _lowercase ( __lowerCamelCase : float ,__lowerCamelCase : float ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__lowerCamelCase ) * abs(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 344 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
a__ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__ : bool = field(
default=__lowerCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class UpperCamelCase__ :
a__ : str = field(
default=__lowerCamelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__ : str = field(
default=__lowerCamelCase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Train language if it is different from the evaluation language.'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__ : Optional[bool] = field(
default=__lowerCamelCase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _lowercase ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' ,__lowerCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
UpperCamelCase__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCamelCase__ : Optional[int] = load_dataset(
'''xnli''' ,model_args.language ,split='''train''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
UpperCamelCase__ : Any = load_dataset(
'''xnli''' ,model_args.train_language ,split='''train''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : Union[str, Any] = train_dataset.features['''label'''].names
if training_args.do_eval:
UpperCamelCase__ : Optional[int] = load_dataset(
'''xnli''' ,model_args.language ,split='''validation''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : int = eval_dataset.features['''label'''].names
if training_args.do_predict:
UpperCamelCase__ : Union[str, Any] = load_dataset(
'''xnli''' ,model_args.language ,split='''test''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : List[Any] = predict_dataset.features['''label'''].names
# Labels
UpperCamelCase__ : Dict = len(__lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__lowerCamelCase ,idalabel={str(__lowerCamelCase ): label for i, label in enumerate(__lowerCamelCase )} ,labelaid={label: i for i, label in enumerate(__lowerCamelCase )} ,finetuning_task='''xnli''' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=__lowerCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCamelCase__ : int = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCamelCase__ : Any = False
def preprocess_function(__lowerCamelCase : Dict ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] ,examples['''hypothesis'''] ,padding=__lowerCamelCase ,max_length=data_args.max_seq_length ,truncation=__lowerCamelCase ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ : Tuple = min(len(__lowerCamelCase ) ,data_args.max_train_samples )
UpperCamelCase__ : str = train_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCamelCase__ : Union[str, Any] = train_dataset.map(
__lowerCamelCase ,batched=__lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on train dataset''' ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCamelCase ) ) ,3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ : Optional[Any] = min(len(__lowerCamelCase ) ,data_args.max_eval_samples )
UpperCamelCase__ : Dict = eval_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCamelCase__ : Optional[int] = eval_dataset.map(
__lowerCamelCase ,batched=__lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on validation dataset''' ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCamelCase__ : int = min(len(__lowerCamelCase ) ,data_args.max_predict_samples )
UpperCamelCase__ : List[Any] = predict_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
UpperCamelCase__ : int = predict_dataset.map(
__lowerCamelCase ,batched=__lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on prediction dataset''' ,)
# Get the metric function
UpperCamelCase__ : Union[str, Any] = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase : EvalPrediction ):
UpperCamelCase__ : str = p.predictions[0] if isinstance(p.predictions ,__lowerCamelCase ) else p.predictions
UpperCamelCase__ : Optional[int] = np.argmax(__lowerCamelCase ,axis=1 )
return metric.compute(predictions=__lowerCamelCase ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCamelCase__ : List[Any] = default_data_collator
elif training_args.fpaa:
UpperCamelCase__ : List[str] = DataCollatorWithPadding(__lowerCamelCase ,pad_to_multiple_of=8 )
else:
UpperCamelCase__ : List[str] = None
# Initialize our Trainer
UpperCamelCase__ : Any = Trainer(
model=__lowerCamelCase ,args=__lowerCamelCase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=__lowerCamelCase ,tokenizer=__lowerCamelCase ,data_collator=__lowerCamelCase ,)
# Training
if training_args.do_train:
UpperCamelCase__ : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ : Union[str, Any] = last_checkpoint
UpperCamelCase__ : Any = trainer.train(resume_from_checkpoint=__lowerCamelCase )
UpperCamelCase__ : Optional[int] = train_result.metrics
UpperCamelCase__ : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
UpperCamelCase__ : int = min(__lowerCamelCase ,len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,__lowerCamelCase )
trainer.save_metrics('''train''' ,__lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ : Tuple = trainer.evaluate(eval_dataset=__lowerCamelCase )
UpperCamelCase__ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = min(__lowerCamelCase ,len(__lowerCamelCase ) )
trainer.log_metrics('''eval''' ,__lowerCamelCase )
trainer.save_metrics('''eval''' ,__lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = trainer.predict(__lowerCamelCase ,metric_key_prefix='''predict''' )
UpperCamelCase__ : Optional[int] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCamelCase )
)
UpperCamelCase__ : List[Any] = min(__lowerCamelCase ,len(__lowerCamelCase ) )
trainer.log_metrics('''predict''' ,__lowerCamelCase )
trainer.save_metrics('''predict''' ,__lowerCamelCase )
UpperCamelCase__ : Dict = np.argmax(__lowerCamelCase ,axis=1 )
UpperCamelCase__ : List[Any] = os.path.join(training_args.output_dir ,'''predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase ,'''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__lowerCamelCase ):
UpperCamelCase__ : Tuple = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 344 | 1 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__A = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
__A = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
__A = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215 |
"""simple docstring"""
lowercase_ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowercase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowercase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 215 | 1 |
"""simple docstring"""
import os
import sys
import unittest
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
A = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class a__ ( unittest.TestCase ):
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Any = get_test_to_tester_mapping(UpperCamelCase_)
__UpperCAmelCase : str = get_test_to_tester_mapping(UpperCamelCase_)
__UpperCAmelCase : List[str] = {"BertModelTest": "BertModelTester"}
__UpperCAmelCase : Optional[Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = get_model_to_test_mapping(UpperCamelCase_)
__UpperCAmelCase : str = get_model_to_test_mapping(UpperCamelCase_)
__UpperCAmelCase : int = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__UpperCAmelCase : Optional[int] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = get_model_to_tester_mapping(UpperCamelCase_)
__UpperCAmelCase : Any = get_model_to_tester_mapping(UpperCamelCase_)
__UpperCAmelCase : List[Any] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__UpperCAmelCase : Optional[int] = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
self.assertEqual(get_test_info.to_json(UpperCamelCase_) , UpperCamelCase_)
| 77 |
from collections import defaultdict
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = first_str.lower().strip()
_snake_case : Dict = second_str.lower().strip()
# Remove whitespace
_snake_case : Dict = first_str.replace(' ' , '' )
_snake_case : Union[str, Any] = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
return False
# Default values for count should be 0
_snake_case : defaultdict[str, int] = defaultdict(__lowerCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ : Optional[int] = input('''Enter the first string ''').strip()
lowercase_ : List[str] = input('''Enter the second string ''').strip()
lowercase_ : Any = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 304 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[float] ):
'''simple docstring'''
if len(A_ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
_lowerCamelCase : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598 |
"""simple docstring"""
def snake_case_ ( A_ : int = 10, A_ : int = 22 ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = range(1, A_ )
_lowerCamelCase : Dict = range(1, A_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 598 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=2 , snake_case__=3 , snake_case__=4 , snake_case__=2 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=36 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=6 , snake_case__=6 , snake_case__=3 , snake_case__=4 , snake_case__=None , snake_case__=1000 , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Optional[int] = patch_size
_lowerCAmelCase : int = is_training
_lowerCAmelCase : str = use_input_mask
_lowerCAmelCase : Dict = use_token_type_ids
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : List[str] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[int] = coordinate_size
_lowerCAmelCase : Optional[int] = shape_size
_lowerCAmelCase : int = num_labels
_lowerCAmelCase : Any = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : Optional[Any] = text_seq_length
_lowerCAmelCase : str = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : Optional[Any] = self.text_seq_length + self.image_seq_length
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_lowerCAmelCase : Optional[Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Tuple = bbox[i, j, 3]
_lowerCAmelCase : Optional[int] = bbox[i, j, 1]
_lowerCAmelCase : Tuple = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : Tuple = bbox[i, j, 2]
_lowerCAmelCase : Dict = bbox[i, j, 0]
_lowerCAmelCase : str = tmp_coordinate
_lowerCAmelCase : Union[str, Any] = tf.constant(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Dict = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase : int = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCAmelCase : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = TFLayoutLMvaModel(config=_lowerCamelCase )
# text + image
_lowerCAmelCase : int = model(_lowerCamelCase , pixel_values=_lowerCamelCase , training=_lowerCamelCase )
_lowerCAmelCase : Tuple = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , training=_lowerCamelCase , )
_lowerCAmelCase : Dict = model(_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase : str = model(_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase : List[Any] = model({'pixel_values': pixel_values} , training=_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : int = TFLayoutLMvaForSequenceClassification(config=_lowerCamelCase )
_lowerCAmelCase : int = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : int = TFLayoutLMvaForTokenClassification(config=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = 2
_lowerCAmelCase : List[str] = TFLayoutLMvaForQuestionAnswering(config=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = model(
_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , training=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : str = config_and_inputs
_lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return True
def a ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = copy.deepcopy(_lowerCamelCase )
if model_class in get_values(_lowerCamelCase ):
_lowerCAmelCase : Tuple = {
k: tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_lowerCamelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_lowerCAmelCase : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFLayoutLMvaModelTester(self )
_lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_lowerCamelCase )
if getattr(_lowerCamelCase , 'hf_compute_loss' , _lowerCamelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
_lowerCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _lowerCamelCase , return_labels=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_lowerCamelCase )[0]
]
_lowerCAmelCase : Optional[int] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowerCAmelCase : Dict = self._prepare_for_class(inputs_dict.copy() , _lowerCamelCase , return_labels=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = prepared_for_class.pop('input_ids' )
_lowerCAmelCase : Optional[Any] = model(_lowerCamelCase , **_lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_lowerCAmelCase : List[str] = self._prepare_for_class(inputs_dict.copy() , _lowerCamelCase , return_labels=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
_lowerCAmelCase : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_lowerCAmelCase : Dict = -100
_lowerCAmelCase : Union[str, Any] = tf.convert_to_tensor(_lowerCamelCase )
_lowerCAmelCase : List[str] = model(_lowerCamelCase , **_lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_lowerCAmelCase : Tuple = self._prepare_for_class(inputs_dict.copy() , _lowerCamelCase , return_labels=_lowerCamelCase )
_lowerCAmelCase : int = model(_lowerCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _lowerCamelCase , return_labels=_lowerCamelCase )
# Get keys that were added with the _prepare_for_class function
_lowerCAmelCase : Any = prepared_for_class.keys() - inputs_dict.keys()
_lowerCAmelCase : List[Any] = inspect.signature(model.call ).parameters
_lowerCAmelCase : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_lowerCAmelCase : List[str] = {0: 'input_ids'}
for label_key in label_keys:
_lowerCAmelCase : Union[str, Any] = signature_names.index(_lowerCamelCase )
_lowerCAmelCase : List[str] = label_key
_lowerCAmelCase : int = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_lowerCAmelCase : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_lowerCAmelCase : Optional[Any] = prepared_for_class[value]
_lowerCAmelCase : Optional[int] = tuple(_lowerCamelCase )
# Send to model
_lowerCAmelCase : Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def a ( self ):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a ( self ):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Optional[Any] = type
self.model_tester.create_and_check_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a ( self ):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a ( self ):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a ( self ):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@slow
def a ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = TFLayoutLMvaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) if is_vision_available() else None
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
_lowerCAmelCase : List[str] = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : str = image_processor(images=_lowerCamelCase , return_tensors='tf' ).pixel_values
_lowerCAmelCase : Tuple = tf.constant([[1, 2]] )
_lowerCAmelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_lowerCAmelCase : int = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , pixel_values=_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
_lowerCAmelCase : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _lowerCamelCase )
_lowerCAmelCase : List[str] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 444 | '''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __UpperCAmelCase :
@staticmethod
def UpperCAmelCase_ ( *_lowerCamelCase , **_lowerCamelCase ):
pass
def snake_case_ ( __snake_case : int) -> Union[str, Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
A_ : Any =(
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
__A : Union[str, Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model=_lowerCamelCase , tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , '''''' ) ) )
lowerCAmelCase_ = '''What is the placebo?'''
lowerCAmelCase_ = [
{
'''image''': load_image(_lowerCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = dqa_pipeline(_lowerCamelCase , top_k=2 )
self.assertEqual(
_lowerCamelCase , [
[
{'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase ), '''start''': ANY(_lowerCamelCase ), '''end''': ANY(_lowerCamelCase )},
{'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase ), '''start''': ANY(_lowerCamelCase ), '''end''': ANY(_lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''How many cats are there?'''
lowerCAmelCase_ = [
{'''score''': 0.00_01, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.00_01, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , _lowerCamelCase )
lowerCAmelCase_ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , _lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(_lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , words=_lowerCamelCase , boxes=_lowerCamelCase , top_k=2 )
self.assertEqual(_lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''What is the invoice number?'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowerCAmelCase_ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowerCAmelCase_ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''What is the invoice number?'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowerCAmelCase_ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowerCAmelCase_ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_lowerCamelCase )
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_lowerCamelCase , revision='''3dc6de3''' , )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''What is the invoice number?'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowerCAmelCase_ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowerCAmelCase_ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowerCAmelCase_ = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase_ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_lowerCamelCase )
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_lowerCamelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''What is the invoice number?'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowerCAmelCase_ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowerCAmelCase_ = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase_ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowerCAmelCase_ = INVOICE_URL
lowerCAmelCase_ = '''What is the invoice number?'''
lowerCAmelCase_ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def UpperCAmelCase_ ( self ):
pass
| 274 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A=2 , __A=True , __A=False , __A=10 , __A=3 , __A=32 * 4 , __A=32 * 6 , __A=4 , __A=32 , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = parent
lowerCamelCase : Dict = batch_size
lowerCamelCase : int = is_training
lowerCamelCase : int = use_auxiliary_loss
lowerCamelCase : Dict = num_queries
lowerCamelCase : int = num_channels
lowerCamelCase : List[Any] = min_size
lowerCamelCase : List[str] = max_size
lowerCamelCase : int = num_labels
lowerCamelCase : str = mask_feature_size
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__A )
lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__A )
lowerCamelCase : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__A ) > 0.5
).float()
lowerCamelCase : Any = (torch.rand((self.batch_size, self.num_labels) , device=__A ) > 0.5).long()
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
lowerCamelCase : Tuple = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : str = output.encoder_hidden_states
lowerCamelCase : Union[str, Any] = output.pixel_decoder_hidden_states
lowerCamelCase : List[str] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ) , config.decoder_config.decoder_layers )
def _snake_case ( self , __A , __A , __A , __A=False ):
"""simple docstring"""
with torch.no_grad():
lowerCamelCase : Dict = MaskFormerModel(config=__A )
model.to(__A )
model.eval()
lowerCamelCase : Tuple = model(pixel_values=__A , pixel_mask=__A )
lowerCamelCase : Optional[Any] = model(__A , output_hidden_states=__A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__A , __A )
def _snake_case ( self , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = MaskFormerForInstanceSegmentation(config=__A )
model.to(__A )
model.eval()
def comm_check_on_output(__A ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCamelCase : List[Any] = model(pixel_values=__A , pixel_mask=__A )
lowerCamelCase : str = model(__A )
comm_check_on_output(__A )
lowerCamelCase : str = model(
pixel_values=__A , pixel_mask=__A , mask_labels=__A , class_labels=__A )
comm_check_on_output(__A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Tuple = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__A : Optional[Any] = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Any = False
__A : Dict = False
__A : Optional[Any] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = MaskFormerModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , has_text_modality=__A )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__A )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def _snake_case ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = model_class(__A )
lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : int = [*signature.parameters.keys()]
lowerCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCamelCase : Any = MaskFormerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (self.model_tester.min_size,) * 2
lowerCamelCase : Union[str, Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=__A ),
"mask_labels": torch.randn((2, 10, *size) , device=__A ),
"class_labels": torch.zeros(2 , 10 , device=__A ).long(),
}
lowerCamelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__A )
lowerCamelCase : Tuple = model(**__A )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__A , **__A , output_hidden_states=__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__A ).to(__A )
lowerCamelCase : Union[str, Any] = model(**__A , output_attentions=__A )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCamelCase : Tuple = self.all_model_classes[1]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : Dict = model_class(__A )
model.to(__A )
model.train()
lowerCamelCase : Any = model(__A , mask_labels=__A , class_labels=__A ).loss
loss.backward()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.all_model_classes[1]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = True
lowerCamelCase : str = model_class(__A )
model.to(__A )
model.train()
lowerCamelCase : List[Any] = model(__A , mask_labels=__A , class_labels=__A )
lowerCamelCase : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCamelCase : List[str] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCamelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_snake_case = 1E-4
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__A )
lowerCamelCase : Union[str, Any] = self.default_image_processor
lowerCamelCase : int = prepare_img()
lowerCamelCase : Any = image_processor(__A , return_tensors="pt" ).to(__A )
lowerCamelCase : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase : Dict = model(**__A )
lowerCamelCase : Any = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
lowerCamelCase : Tuple = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __A , atol=__A ) )
lowerCamelCase : Dict = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __A , atol=__A ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__A )
.eval()
)
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Optional[int] = prepare_img()
lowerCamelCase : List[Any] = image_processor(__A , return_tensors="pt" ).to(__A )
lowerCamelCase : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase : Tuple = model(**__A )
# masks_queries_logits
lowerCamelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase : Tuple = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
lowerCamelCase : Optional[Any] = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) )
# class_queries_logits
lowerCamelCase : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase : Dict = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(__A )
.eval()
)
lowerCamelCase : List[Any] = self.default_image_processor
lowerCamelCase : int = prepare_img()
lowerCamelCase : Optional[int] = image_processor(__A , return_tensors="pt" ).to(__A )
lowerCamelCase : int = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__A , (1, 3, 800, 1088) )
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**__A )
# masks_queries_logits
lowerCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCamelCase : Any = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
lowerCamelCase : str = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __A , atol=__A ) )
# class_queries_logits
lowerCamelCase : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCamelCase : Any = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __A , atol=__A ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__A )
.eval()
)
lowerCamelCase : int = self.default_image_processor
lowerCamelCase : Optional[int] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCamelCase : str = inputs["pixel_values"].to(__A )
lowerCamelCase : Tuple = [el.to(__A ) for el in inputs["mask_labels"]]
lowerCamelCase : Optional[int] = [el.to(__A ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**__A )
self.assertTrue(outputs.loss is not None )
| 231 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''BeitFeatureExtractor''']
_snake_case = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 231 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Union[str, Any] = 1
snake_case : Dict = 3
snake_case : Any = (32, 32)
snake_case : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase__ )
return image
@property
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Any = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : int = self.dummy_cond_unet_upscale
snake_case : Dict = DDPMScheduler()
snake_case : Any = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case : Any = self.dummy_vae
snake_case : Tuple = self.dummy_text_encoder
snake_case : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Optional[Any] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case : int = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=350 , )
snake_case : List[Any] = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Optional[Any] = '''A painting of a squirrel eating a burger'''
snake_case : str = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
snake_case : str = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
snake_case : int = output.images
snake_case : Optional[int] = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
snake_case : Dict = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase__ , )[0]
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : List[str] = image_from_tuple[0, -3:, -3:, -1]
snake_case : int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case : str = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : int = self.dummy_cond_unet_upscale
snake_case : List[str] = DDPMScheduler()
snake_case : Tuple = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case : Optional[Any] = self.dummy_vae
snake_case : List[str] = self.dummy_text_encoder
snake_case : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : int = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case : Dict = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=350 , )
snake_case : List[str] = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Optional[int] = '''A painting of a squirrel eating a burger'''
snake_case : Tuple = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
snake_case : List[str] = output.images
assert image.shape[0] == 2
snake_case : Tuple = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
snake_case : Optional[int] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
snake_case : Any = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case : List[Any] = self.dummy_cond_unet_upscale
snake_case : str = DDPMScheduler()
snake_case : Tuple = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case : List[str] = self.dummy_vae
snake_case : str = self.dummy_text_encoder
snake_case : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Any = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case : Dict = unet.half()
snake_case : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case : Optional[Any] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=350 , )
snake_case : str = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Tuple = '''A painting of a squirrel eating a burger'''
snake_case : str = torch.manual_seed(0 )
snake_case : int = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , ).images
snake_case : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
snake_case : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
snake_case : Tuple = '''a cat sitting on a park bench'''
snake_case : str = torch.manual_seed(0 )
snake_case : Dict = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''np''' , )
snake_case : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
snake_case : Optional[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case : Tuple = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
snake_case : Optional[int] = '''a cat sitting on a park bench'''
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : Any = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''np''' , )
snake_case : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case : List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case : Any = '''a cat sitting on a park bench'''
snake_case : int = torch.manual_seed(0 )
snake_case : List[Any] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , output_type='''np''' , )
snake_case : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 638 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """swin"""
lowerCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , UpperCamelCase__ : int=224 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=3 , UpperCamelCase__ : str=96 , UpperCamelCase__ : List[Any]=[2, 2, 6, 2] , UpperCamelCase__ : Optional[Any]=[3, 6, 12, 24] , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : Tuple=4.0 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : List[Any]=1e-5 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int=None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
snake_case : Any = image_size
snake_case : Optional[int] = patch_size
snake_case : List[Any] = num_channels
snake_case : Union[str, Any] = embed_dim
snake_case : str = depths
snake_case : str = len(UpperCamelCase__ )
snake_case : List[Any] = num_heads
snake_case : List[Any] = window_size
snake_case : Optional[int] = mlp_ratio
snake_case : Union[str, Any] = qkv_bias
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Optional[int] = attention_probs_dropout_prob
snake_case : Optional[int] = drop_path_rate
snake_case : List[Any] = hidden_act
snake_case : int = use_absolute_embeddings
snake_case : str = layer_norm_eps
snake_case : Optional[Any] = initializer_range
snake_case : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case : str = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
snake_case : int = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
snake_case ,snake_case : List[Any] = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = version.parse("""1.11""" )
@property
def lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1e-4
| 638 | 1 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCAmelCase_ : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( lowerCamelCase ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if args.student_type == "roberta":
__magic_name__ : Optional[Any] =False
elif args.student_type == "gpt2":
__magic_name__ : str =False
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
if args.student_type == "roberta":
__magic_name__ : Dict =False
def lowerCAmelCase_ ( ):
__magic_name__ : int =argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=lowerCamelCase , required=lowerCamelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=lowerCamelCase , required=lowerCamelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=lowerCamelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=lowerCamelCase , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=lowerCamelCase , required=lowerCamelCase , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=lowerCamelCase , type=lowerCamelCase , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=lowerCamelCase , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=lowerCamelCase , required=lowerCamelCase , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=lowerCamelCase , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=lowerCamelCase , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=lowerCamelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=lowerCamelCase , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=lowerCamelCase , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=lowerCamelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.1_5 , type=lowerCamelCase , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=lowerCamelCase , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=lowerCamelCase , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=lowerCamelCase , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=lowerCamelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=lowerCamelCase , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=lowerCamelCase , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=lowerCamelCase , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCamelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.0_5 , type=lowerCamelCase , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=lowerCamelCase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=lowerCamelCase , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=lowerCamelCase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=lowerCamelCase , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.0_2 , type=lowerCamelCase , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCamelCase , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=lowerCamelCase , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=lowerCamelCase , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=lowerCamelCase , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=lowerCamelCase , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=lowerCamelCase , default=4000 , help="""Checkpoint interval.""" )
__magic_name__ : List[Any] =parser.parse_args()
sanity_checks(lowerCamelCase )
# ARGS #
init_gpu_params(lowerCamelCase )
set_seed(lowerCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(lowerCamelCase ) , lowerCamelCase , indent=4 )
git_log(args.dump_path )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =MODEL_CLASSES[args.student_type]
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__magic_name__ : int =teacher_tokenizer_class.from_pretrained(args.teacher_name )
__magic_name__ : Any ={}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__magic_name__ : List[Any] =tokenizer.all_special_tokens.index(lowerCamelCase )
__magic_name__ : int =tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
__magic_name__ : List[str] =special_tok_ids
__magic_name__ : str =tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
__magic_name__ : List[Any] =pickle.load(lowerCamelCase )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
__magic_name__ : List[str] =pickle.load(lowerCamelCase )
__magic_name__ : List[Any] =np.maximum(lowerCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__magic_name__ : Optional[int] =0.0 # do not predict special tokens
__magic_name__ : Tuple =torch.from_numpy(lowerCamelCase )
else:
__magic_name__ : Tuple =None
__magic_name__ : Optional[int] =LmSeqsDataset(params=lowerCamelCase , data=lowerCamelCase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
__magic_name__ : Tuple =student_config_class.from_pretrained(args.student_config )
__magic_name__ : Tuple =True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
__magic_name__ : List[str] =student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCamelCase )
else:
__magic_name__ : Optional[int] =student_model_class(lowerCamelCase )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
__magic_name__ : Dict =teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCamelCase )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCamelCase , lowerCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCamelCase , lowerCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__magic_name__ : Union[str, Any] =Distiller(
params=lowerCamelCase , dataset=lowerCamelCase , token_probs=lowerCamelCase , student=lowerCamelCase , teacher=lowerCamelCase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 367 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase_ ( *lowerCamelCase , lowerCamelCase = None , lowerCamelCase=True , lowerCamelCase=2 ):
from .. import __version__
__magic_name__ : Optional[int] =take_from
__magic_name__ : Tuple =()
if not isinstance(args[0] , lowerCamelCase ):
__magic_name__ : List[Any] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCamelCase ).base_version ) >= version.parse(lowerCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
__magic_name__ : List[str] =None
if isinstance(lowerCamelCase , lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCamelCase ),)
__magic_name__ : List[str] =F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(lowerCamelCase , lowerCamelCase ):
values += (getattr(lowerCamelCase , lowerCamelCase ),)
__magic_name__ : List[str] =F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
__magic_name__ : List[str] =F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
__magic_name__ : List[Any] =warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowerCamelCase , stacklevel=lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) > 0:
__magic_name__ : List[Any] =inspect.getouterframes(inspect.currentframe() )[1]
__magic_name__ : Optional[int] =call_frame.filename
__magic_name__ : Tuple =call_frame.lineno
__magic_name__ : str =call_frame.function
__magic_name__ , __magic_name__ : Optional[int] =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(lowerCamelCase ) == 0:
return
elif len(lowerCamelCase ) == 1:
return values[0]
return values
| 367 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowercase ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=4 , ) -> List[str]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def _a ( self) -> Optional[int]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self) -> Tuple:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowercase ( snake_case__ , unittest.TestCase ):
__UpperCAmelCase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self) -> Dict:
__snake_case = FlaxAlbertModelTester(self)
@slow
def _a ( self) -> Any:
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('albert-base-v2')
__snake_case = model(np.ones((1, 1)))
self.assertIsNotNone(_UpperCAmelCase)
@require_flax
class __lowercase ( unittest.TestCase ):
@slow
def _a ( self) -> Union[str, Any]:
__snake_case = FlaxAlbertModel.from_pretrained('albert-base-v2')
__snake_case = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__snake_case = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__snake_case = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase)[0]
__snake_case = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , _UpperCAmelCase)
__snake_case = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4))
| 313 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
enable_full_determinism()
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Dict = UNetaDModel
a_ : List[Any] = '''sample'''
@property
def _lowercase ( self ) -> Tuple:
snake_case__ =4
snake_case__ =3
snake_case__ =(32, 32)
snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] ).to(_UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ) -> Optional[int]:
return (3, 32, 32)
@property
def _lowercase ( self ) -> Optional[int]:
return (3, 32, 32)
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ ={
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Union[str, Any] = UNetaDModel
a_ : Optional[Any] = '''sample'''
@property
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =4
snake_case__ =4
snake_case__ =(32, 32)
snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] ).to(_UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ) -> Optional[int]:
return (4, 32, 32)
@property
def _lowercase ( self ) -> Dict:
return (4, 32, 32)
def _lowercase ( self ) -> str:
snake_case__ ={
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self ) -> Dict:
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_UpperCAmelCase )
snake_case__ =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase )
model.to(_UpperCAmelCase )
snake_case__ =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _lowercase ( self ) -> Optional[Any]:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase )
model_accelerate.to(_UpperCAmelCase )
model_accelerate.eval()
snake_case__ =torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ =noise.to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] * noise.shape[0] ).to(_UpperCAmelCase )
snake_case__ =model_accelerate(_UpperCAmelCase , _UpperCAmelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case__ , snake_case__ =UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase , low_cpu_mem_usage=_UpperCAmelCase )
model_normal_load.to(_UpperCAmelCase )
model_normal_load.eval()
snake_case__ =model_normal_load(_UpperCAmelCase , _UpperCAmelCase )['sample']
assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(_UpperCAmelCase )
snake_case__ =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ =noise.to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] * noise.shape[0] ).to(_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample
snake_case__ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case__ =torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 ) )
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : List[str] = UNetaDModel
a_ : Optional[int] = '''sample'''
@property
def _lowercase ( self , _UpperCAmelCase=(32, 32) ) -> Tuple:
snake_case__ =4
snake_case__ =3
snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ) -> Union[str, Any]:
return (3, 32, 32)
@property
def _lowercase ( self ) -> Optional[Any]:
return (3, 32, 32)
def _lowercase ( self ) -> str:
snake_case__ ={
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
@slow
def _lowercase ( self ) -> List[Any]:
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_UpperCAmelCase )
snake_case__ =self.dummy_input
snake_case__ =floats_tensor((4, 3) + (256, 256) ).to(_UpperCAmelCase )
snake_case__ =noise
snake_case__ =model(**_UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(_UpperCAmelCase )
snake_case__ =4
snake_case__ =3
snake_case__ =(256, 256)
snake_case__ =torch.ones((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor(batch_size * [1E-4] ).to(_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample
snake_case__ =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ =torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) )
def _lowercase ( self ) -> List[Any]:
snake_case__ =UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(_UpperCAmelCase )
snake_case__ =4
snake_case__ =3
snake_case__ =(32, 32)
snake_case__ =torch.ones((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor(batch_size * [1E-4] ).to(_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample
snake_case__ =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ =torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) )
def _lowercase ( self ) -> Optional[Any]:
# not required for this model
pass
| 538 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase_ = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def A ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCAmelCase ):
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.getbasetemp() / '''cache'''
UpperCAmelCase_ = test_hf_cache_home / '''datasets'''
UpperCAmelCase_ = test_hf_cache_home / '''metrics'''
UpperCAmelCase_ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(__UpperCamelCase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(__UpperCamelCase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(__UpperCamelCase ) )
UpperCAmelCase_ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(__UpperCamelCase ) )
UpperCAmelCase_ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='''session''' )
def A ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCAmelCase ):
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCAmelCase ):
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , __UpperCamelCase )
| 713 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCamelCase_ = logging.get_logger(__name__)
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase_ = json.loads(__UpperCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase_ = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase_ = json.loads(__UpperCAmelCase )
if not mpi_options.get('''sagemaker_mpi_enabled''' , __UpperCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( _snake_case ):
UpperCamelCase__ : str =field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def __a ( self :List[Any]) -> int:
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , _lowercase , )
@cached_property
def __a ( self :List[Any]) -> "torch.device":
logger.info('''PyTorch: setting up devices''')
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''')
if self.no_cuda:
UpperCAmelCase_ = torch.device('''cpu''')
UpperCAmelCase_ = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase_ = smp.local_rank()
UpperCAmelCase_ = torch.device('''cuda''' , _lowercase)
UpperCAmelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta)
UpperCAmelCase_ = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK'''))
UpperCAmelCase_ = torch.device('''cuda''' , self.local_rank)
UpperCAmelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''')
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta)
UpperCAmelCase_ = torch.device('''cuda''' , self.local_rank)
UpperCAmelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(_lowercase)
return device
@property
def __a ( self :str) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __a ( self :Optional[Any]) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def __a ( self :List[str]) -> List[Any]:
return False
| 561 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__magic_name__ : Tuple = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class lowercase__ ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_tool("""text-question-answering""" )
self.tool.setup()
UpperCamelCase : Dict = load_tool("""text-question-answering""" , remote=_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.tool(_A , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.remote_tool(_A , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tool(text=_A , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.remote_tool(text=_A , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
| 102 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Dict = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 233 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __A ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = load_tool('''text-to-speech''' )
self.tool.setup()
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ = self.tool('''hey''' )
lowerCamelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ = self.tool('''hey''' )
lowerCamelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 712 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_a = datasets.logging.get_logger(__name__)
_a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {doc: key_lines}
lowerCamelCase__ = {doc: sys_lines}
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
if remove_nested:
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for name, metric in metrics:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,)
if conll_subparts_num == 3:
lowerCamelCase__ = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase__ = line.split()[5]
if not parse_col == "-":
lowerCamelCase__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 29 | 0 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_lowerCAmelCase : List[str] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_lowerCAmelCase : Optional[Any] = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowerCAmelCase : Optional[Any] = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowerCAmelCase : Any = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_lowerCAmelCase : Union[str, Any] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def UpperCAmelCase_ ( snake_case__ ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , snake_case__ )
return [m.group(0 ) for m in matches]
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase__ = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCAmelCase__ = collections.defaultdict(snake_case__ )
lowerCAmelCase__ = collections.defaultdict(snake_case__ )
lowerCAmelCase__ = collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(snake_case__ ):
lowerCAmelCase__ = None
if _re_tf_models.match(snake_case__ ) is not None:
lowerCAmelCase__ = tf_models
lowerCAmelCase__ = _re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
lowerCAmelCase__ = flax_models
lowerCAmelCase__ = _re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
lowerCAmelCase__ = pt_models
lowerCAmelCase__ = _re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCAmelCase__ = True
break
# Try again after removing the last word in the name
lowerCAmelCase__ = ''.join(camel_case_split(snake_case__ )[:-1] )
lowerCAmelCase__ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCAmelCase__ = list(snake_case__ )
all_models.sort()
lowerCAmelCase__ = {'model_type': all_models}
lowerCAmelCase__ = [pt_models[t] for t in all_models]
lowerCAmelCase__ = [tf_models[t] for t in all_models]
lowerCAmelCase__ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCAmelCase__ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCAmelCase__ = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCAmelCase__ = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCAmelCase__ = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCAmelCase__ = 'AutoTokenizer'
lowerCAmelCase__ = [processors[t] for t in all_models]
return pd.DataFrame(snake_case__ )
def UpperCAmelCase_ ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCAmelCase__ = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}']
lowerCAmelCase__ = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(snake_case__ , snake_case__ , snake_case__ ):
# The type of pipeline may not exist in this framework
if not hasattr(snake_case__ , snake_case__ ):
continue
# First extract all model_names
lowerCAmelCase__ = []
for name in getattr(snake_case__ , snake_case__ ).values():
if isinstance(snake_case__ , snake_case__ ):
model_names.append(snake_case__ )
else:
model_names.extend(list(snake_case__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ = get_frameworks_table()
lowerCAmelCase__ = Dataset.from_pandas(snake_case__ )
lowerCAmelCase__ = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=snake_case__ )
lowerCAmelCase__ = Dataset.from_json(snake_case__ )
lowerCAmelCase__ = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(snake_case__ ) )
}
lowerCAmelCase__ = update_pipeline_and_auto_class_table(snake_case__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCAmelCase__ = sorted(table.keys() )
lowerCAmelCase__ = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
lowerCAmelCase__ = Dataset.from_pandas(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(snake_case__ , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(snake_case__ , 'pipeline_tags.json' ) )
if commit_sha is not None:
lowerCAmelCase__ = (
f'Update with commit {commit_sha}\n\nSee: '
f'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
lowerCAmelCase__ = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=snake_case__ , repo_type='dataset' , token=snake_case__ , commit_message=snake_case__ , )
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCAmelCase__ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCAmelCase__ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCAmelCase__ = pipeline_tasks[key]['pt']
if isinstance(snake_case__ , (list, tuple) ):
lowerCAmelCase__ = model[0]
lowerCAmelCase__ = model.__name__
if model not in in_table.values():
missing.append(snake_case__ )
if len(snake_case__ ) > 0:
lowerCAmelCase__ = ', '.join(snake_case__ )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
_lowerCAmelCase : Any = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 193 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'wavlm'
def __init__( self ,a_=32 ,a_=768 ,a_=12 ,a_=12 ,a_=3072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=0.1 ,a_=0.0 ,a_=0.1 ,a_=0.1 ,a_=0.02 ,a_=1e-5 ,a_="group" ,a_="gelu" ,a_=(512, 512, 512, 512, 512, 512, 512) ,a_=(5, 2, 2, 2, 2, 2, 2) ,a_=(10, 3, 3, 3, 3, 2, 2) ,a_=False ,a_=128 ,a_=16 ,a_=320 ,a_=800 ,a_=False ,a_=True ,a_=0.05 ,a_=10 ,a_=2 ,a_=0.0 ,a_=10 ,a_=320 ,a_=2 ,a_=0.1 ,a_=100 ,a_=256 ,a_=256 ,a_=0.1 ,a_="mean" ,a_=False ,a_=False ,a_=256 ,a_=(512, 512, 512, 512, 1500) ,a_=(5, 3, 3, 1, 1) ,a_=(1, 2, 3, 1, 1) ,a_=512 ,a_=80 ,a_=0 ,a_=1 ,a_=2 ,a_=False ,a_=3 ,a_=2 ,a_=3 ,a_=None ,**a_ ,):
"""simple docstring"""
super().__init__(**a_ ,pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = feat_extract_norm
lowerCAmelCase__ = feat_extract_activation
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = conv_bias
lowerCAmelCase__ = num_buckets
lowerCAmelCase__ = max_bucket_distance
lowerCAmelCase__ = num_conv_pos_embeddings
lowerCAmelCase__ = num_conv_pos_embedding_groups
lowerCAmelCase__ = len(self.conv_dim )
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = feat_proj_dropout
lowerCAmelCase__ = final_dropout
lowerCAmelCase__ = layerdrop
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_ctc_classes
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = do_stable_layer_norm
lowerCAmelCase__ = use_weighted_layer_sum
lowerCAmelCase__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ = apply_spec_augment
lowerCAmelCase__ = mask_time_prob
lowerCAmelCase__ = mask_time_length
lowerCAmelCase__ = mask_time_min_masks
lowerCAmelCase__ = mask_feature_prob
lowerCAmelCase__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ = num_codevectors_per_group
lowerCAmelCase__ = num_codevector_groups
lowerCAmelCase__ = contrastive_logits_temperature
lowerCAmelCase__ = num_negatives
lowerCAmelCase__ = codevector_dim
lowerCAmelCase__ = proj_codevector_dim
lowerCAmelCase__ = diversity_loss_weight
# ctc loss
lowerCAmelCase__ = ctc_loss_reduction
lowerCAmelCase__ = ctc_zero_infinity
# adapter
lowerCAmelCase__ = add_adapter
lowerCAmelCase__ = adapter_kernel_size
lowerCAmelCase__ = adapter_stride
lowerCAmelCase__ = num_adapter_layers
lowerCAmelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 193 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = [10, 20, 30, 40, 50, 60]
A_ = [2, 4, 6, 8, 10, 12]
A_ = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def snake_case_ ( self ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , """max_weight must greater than zero.""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , """Weight can not be negative.""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , """Profit can not be negative.""" )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , """max_weight must greater than zero.""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =['''image_processor''', '''tokenizer''']
snake_case__ : str ='''LayoutLMv3ImageProcessor'''
snake_case__ : Optional[Any] =('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self: List[Any] , __a: Any=None , __a: List[str]=None , **__a: Optional[int] )-> Optional[Any]:
lowerCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __a , )
lowerCamelCase : Optional[Any] = kwargs.pop("""feature_extractor""" )
lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__a , __a )
def __call__( self: str , __a: Optional[Any] , __a: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a: Union[List[List[int]], List[List[List[int]]]] = None , __a: Optional[Union[List[int], List[List[int]]]] = None , __a: bool = True , __a: Union[bool, str, PaddingStrategy] = False , __a: Union[bool, str, TruncationStrategy] = None , __a: Optional[int] = None , __a: int = 0 , __a: Optional[int] = None , __a: Optional[bool] = None , __a: Optional[bool] = None , __a: bool = False , __a: bool = False , __a: bool = False , __a: bool = False , __a: bool = True , __a: Optional[Union[str, TensorType]] = None , **__a: Any , )-> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
lowerCamelCase : Tuple = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
lowerCamelCase : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase : Tuple = features["""words"""]
lowerCamelCase : Dict = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
lowerCamelCase : int = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
lowerCamelCase : Union[str, Any] = self.get_overflowing_images(__a , encoded_inputs["""overflow_to_sample_mapping"""] )
lowerCamelCase : List[str] = images
return encoded_inputs
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] )-> Optional[int]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def a__ ( self: int , *__a: Tuple , **__a: Optional[Any] )-> Dict:
return self.tokenizer.batch_decode(*__a , **__a )
def a__ ( self: Dict , *__a: List[Any] , **__a: Optional[Any] )-> str:
return self.tokenizer.decode(*__a , **__a )
@property
def a__ ( self: Optional[int] )-> int:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def a__ ( self: Union[str, Any] )-> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __a , )
return self.image_processor_class
@property
def a__ ( self: Union[str, Any] )-> Optional[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __a , )
return self.image_processor
| 222 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def snake_case ( UpperCamelCase__ : Any ) -> Dict:
if "cls_token" in name:
lowerCamelCase : str = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowerCamelCase : int = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowerCamelCase : str = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase : List[str] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCamelCase : Optional[int] = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase : Tuple = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowerCamelCase : Union[str, Any] = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCamelCase : Tuple = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowerCamelCase : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCamelCase : List[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCamelCase : List[Any] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCamelCase : str = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase : List[Any] = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase : str = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
lowerCamelCase : Any = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
lowerCamelCase : int = key.split(""".""" )
lowerCamelCase : Dict = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase : List[str] = config.decoder_hidden_size
lowerCamelCase : str = """decoder.decoder_layers."""
if "weight" in key:
lowerCamelCase : Dict = val[:dim, :]
lowerCamelCase : Optional[Any] = val[dim : dim * 2, :]
lowerCamelCase : Tuple = val[-dim:, :]
elif "bias" in key:
lowerCamelCase : Optional[Any] = val[:dim]
lowerCamelCase : Optional[Any] = val[dim : dim * 2]
lowerCamelCase : Optional[Any] = val[-dim:]
else:
lowerCamelCase : Optional[int] = config.hidden_size
lowerCamelCase : Tuple = """vit.encoder.layer."""
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Union[str, Any] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
elif "bias" in key:
lowerCamelCase : Any = val[:dim]
lowerCamelCase : Union[str, Any] = val[dim : dim * 2]
lowerCamelCase : Optional[Any] = val[-dim:]
else:
lowerCamelCase : Dict = val
return orig_state_dict
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ) -> Any:
lowerCamelCase : Optional[int] = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase : List[Any] = 1024
lowerCamelCase : Union[str, Any] = 4096
lowerCamelCase : Dict = 24
lowerCamelCase : int = 16
elif "huge" in checkpoint_url:
lowerCamelCase : Dict = 14
lowerCamelCase : int = 1280
lowerCamelCase : Any = 5120
lowerCamelCase : int = 32
lowerCamelCase : Dict = 16
lowerCamelCase : str = ViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
lowerCamelCase : Optional[int] = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase : int = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
lowerCamelCase : Tuple = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowerCamelCase : Union[str, Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
lowerCamelCase : str = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase : Tuple = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase : Any = model(**UpperCamelCase__ )
lowerCamelCase : int = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase : List[Any] = torch.tensor(
[[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] )
elif "huge" in checkpoint_url:
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] )
else:
lowerCamelCase : int = torch.tensor(
[[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 222 | 1 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaPhonemeCTCTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = False
def __lowerCAmelCase ( self ) ->int:
super().setUp()
SCREAMING_SNAKE_CASE : Any = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=20 , _lowerCamelCase=5 ) ->Tuple[str, list]:
SCREAMING_SNAKE_CASE : List[Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCamelCase )) for i in range(len(_lowerCamelCase ) )]
SCREAMING_SNAKE_CASE : List[str] = list(filter(lambda _lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_lowerCamelCase ) , _lowerCamelCase ) )
if max_length is not None and len(_lowerCamelCase ) > max_length:
SCREAMING_SNAKE_CASE : Union[str, Any] = toks[:max_length]
if min_length is not None and len(_lowerCamelCase ) < min_length and len(_lowerCamelCase ) > 0:
while len(_lowerCamelCase ) < min_length:
SCREAMING_SNAKE_CASE : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE : Tuple = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
if " " not in output_txt and len(_lowerCamelCase ) > 1:
SCREAMING_SNAKE_CASE : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCamelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCamelCase )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE : str = ''' ''' + output_txt
SCREAMING_SNAKE_CASE : str = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
return output_txt, output_ids
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Any:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''m xxx ɪ''' , do_phonemize=_lowerCamelCase ).input_ids
self.assertEqual(_lowerCamelCase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
SCREAMING_SNAKE_CASE : str = tokenizer('''m aaa ɪ ccc''' , do_phonemize=_lowerCamelCase ).input_ids
self.assertEqual(_lowerCamelCase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''maɪ c''' , do_phonemize=_lowerCamelCase ).input_ids
self.assertEqual(_lowerCamelCase , [3, 200] ) # mai should be <unk> (=3)
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : List[str] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : List[str] = tokenizer.phonemize(_lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(_lowerCamelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.phonemize(_lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(_lowerCamelCase ).input_ids , tokenizer(_lowerCamelCase , do_phonemize=_lowerCamelCase ).input_ids )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : List[str] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Tuple = tokenizer.phonemize(_lowerCamelCase , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(tokenizer(_lowerCamelCase ).input_ids )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE : Any = tokenizer.batch_decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , batch_tokens[0] )
self.assertEqual(_lowerCamelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : int = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.phonemize(_lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(_lowerCamelCase , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : Optional[int] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Any = tokenizer.phonemize(_lowerCamelCase , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(_lowerCamelCase ).input_ids , tokenizer(_lowerCamelCase , do_phonemize=_lowerCamelCase ).input_ids )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
SCREAMING_SNAKE_CASE : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE : Any = tokenizer.batch_decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , batch_tokens[0] )
self.assertEqual(_lowerCamelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(_lowerCamelCase , filter_word_delimiter_token=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , batch_tokens[0] )
self.assertEqual(_lowerCamelCase , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : Any = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Any = tokenizer.phonemize(_lowerCamelCase , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(tokenizer(_lowerCamelCase ).input_ids , filter_word_delimiter_token=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE : List[str] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : List[str] = tokenizer.phonemize(_lowerCamelCase , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE : int = tokenizer.decode(tokenizer(_lowerCamelCase ).input_ids , filter_word_delimiter_token=_lowerCamelCase )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = '''Hello how are you'''
SCREAMING_SNAKE_CASE : Dict = tokenizer(_lowerCamelCase , phonemizer_lang='''en-us''' ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(_lowerCamelCase , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(_lowerCamelCase , '''ɛ l o h aʊ a ʁ j u''' )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE : int = '''Hello how Are you'''
SCREAMING_SNAKE_CASE : List[str] = '''hello how are you'''
SCREAMING_SNAKE_CASE : str = tokenizer(_lowerCamelCase ).input_ids
SCREAMING_SNAKE_CASE : List[str] = tokenizer(_lowerCamelCase ).input_ids
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.batch_decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def __lowerCAmelCase ( _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = [d[key] for d in offsets]
return retrieved_list
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
SCREAMING_SNAKE_CASE : Tuple = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
SCREAMING_SNAKE_CASE : int = tokenizer.decode(_lowerCamelCase , output_char_offsets=_lowerCamelCase , filter_word_delimiter_token=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase , _lowerCamelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(_lowerCamelCase , _lowerCamelCase ):
self.assertTrue(isinstance(_lowerCamelCase , _lowerCamelCase ) )
self.assertTrue(isinstance(outputs_list[0] , _lowerCamelCase ) )
# transform list to ModelOutput
SCREAMING_SNAKE_CASE : List[str] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
[recursive_check(_lowerCamelCase , _lowerCamelCase ) for la, la in zip(_lowerCamelCase , _lowerCamelCase )]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
SCREAMING_SNAKE_CASE : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase , output_char_offsets=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = [tokenizer.decode(_lowerCamelCase , output_char_offsets=_lowerCamelCase ) for ids in sample_ids]
check_list_tuples_equal(_lowerCamelCase , _lowerCamelCase )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : Tuple = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE : Dict = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE : Dict = tokenizer.add_tokens(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , len(_lowerCamelCase ) )
self.assertEqual(_lowerCamelCase , all_size + len(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_lowerCamelCase )
self.assertGreaterEqual(len(_lowerCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE : str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE : Dict = tokenizer.add_special_tokens(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : Tuple = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , len(_lowerCamelCase ) )
self.assertEqual(_lowerCamelCase , all_size_a + len(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_lowerCamelCase )
self.assertGreaterEqual(len(_lowerCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
def __lowerCAmelCase ( self ) ->List[str]:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
SCREAMING_SNAKE_CASE : int = self.get_tokenizers(fast=_lowerCamelCase , do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE : List[Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_tokens_to_string(_lowerCamelCase )
self.assertIsInstance(output['''text'''] , _lowerCamelCase )
| 333 |
from numpy import exp, pi, sqrt
def UpperCAmelCase_( a__ , a__ = 0.0 , a__ = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : str = "deit"
def __init__( self : Dict ,_UpperCamelCase : Dict=7_6_8 ,_UpperCamelCase : List[str]=1_2 ,_UpperCamelCase : Union[str, Any]=1_2 ,_UpperCamelCase : str=3_0_7_2 ,_UpperCamelCase : Dict="gelu" ,_UpperCamelCase : Dict=0.0 ,_UpperCamelCase : Optional[int]=0.0 ,_UpperCamelCase : List[Any]=0.02 ,_UpperCamelCase : str=1e-12 ,_UpperCamelCase : List[Any]=2_2_4 ,_UpperCamelCase : Dict=1_6 ,_UpperCamelCase : Union[str, Any]=3 ,_UpperCamelCase : int=True ,_UpperCamelCase : Any=1_6 ,**_UpperCamelCase : int ,) -> int:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =hidden_size
SCREAMING_SNAKE_CASE__ =num_hidden_layers
SCREAMING_SNAKE_CASE__ =num_attention_heads
SCREAMING_SNAKE_CASE__ =intermediate_size
SCREAMING_SNAKE_CASE__ =hidden_act
SCREAMING_SNAKE_CASE__ =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ =initializer_range
SCREAMING_SNAKE_CASE__ =layer_norm_eps
SCREAMING_SNAKE_CASE__ =image_size
SCREAMING_SNAKE_CASE__ =patch_size
SCREAMING_SNAKE_CASE__ =num_channels
SCREAMING_SNAKE_CASE__ =qkv_bias
SCREAMING_SNAKE_CASE__ =encoder_stride
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : Optional[int] = version.parse("1.11" )
@property
def __A ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __A ( self : Optional[Any] ) -> float:
'''simple docstring'''
return 1e-4
| 151 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 151 | 1 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
__magic_name__ = 6_3_7_8_1_3_7.0
__magic_name__ = 6_3_5_6_7_5_2.3_1_4_2_4_5
__magic_name__ = 6_378_137
def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float):
A_ : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
A_ : int = atan((1 - flattening) * tan(radians(lowerCamelCase)))
A_ : List[str] = atan((1 - flattening) * tan(radians(lowerCamelCase)))
A_ : int = radians(lowerCamelCase)
A_ : List[Any] = radians(lowerCamelCase)
# Equation
A_ : Any = sin((phi_a - phi_a) / 2)
A_ : int = sin((lambda_a - lambda_a) / 2)
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A_ : List[str] = sqrt(sin_sq_phi + (cos(lowerCamelCase) * cos(lowerCamelCase) * sin_sq_lambda))
return 2 * RADIUS * asin(lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A : Dict = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
a : Tuple = PegasusConfig
a : Any = {}
a : Dict = 'gelu'
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , ):
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = bos_token_id
def UpperCamelCase_ ( self ):
_lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_lowercase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
_lowercase = 20
_lowercase = model_class_name(lowerCAmelCase__ )
_lowercase = model.encode(inputs_dict["""input_ids"""] )
_lowercase , _lowercase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
_lowercase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
_lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
_lowercase = 20
_lowercase = model_class_name(lowerCAmelCase__ )
_lowercase = model.encode(inputs_dict["""input_ids"""] )
_lowercase , _lowercase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowercase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_lowercase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
_lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ) -> Any:
if attention_mask is None:
_lowercase = np.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowercase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( _a , unittest.TestCase ):
a : Optional[int] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Union[str, Any] = True
a : Optional[int] = False
a : Any = False
a : List[Any] = False
def UpperCamelCase_ ( self ):
_lowercase = FlaxPegasusModelTester(self )
_lowercase = ConfigTester(self , config_class=lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( self ):
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_lowercase = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(__UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest("""JIT Enabled""" ):
_lowercase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowercase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase_ ( self ):
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase = model_class(lowerCAmelCase__ )
_lowercase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_lowercase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest("""JIT Enabled""" ):
_lowercase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowercase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase_ ( self ):
for model_class_name in self.all_model_classes:
_lowercase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase__ )
_lowercase = np.ones((1, 1) )
_lowercase = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
def UpperCamelCase_ ( self ):
_lowercase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_lowercase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_lowercase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_lowercase = [
"""California\'s largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.""",
]
_lowercase = tokenizer(lowerCAmelCase__ , return_tensors="""np""" , truncation=lowerCAmelCase__ , max_length=512 , padding=lowerCAmelCase__ )
_lowercase = model.generate(**lowerCAmelCase__ , num_beams=2 ).sequences
_lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
assert tgt_text == decoded | 287 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : list, lowercase : int ) -> List[Any]:
"""simple docstring"""
if len(lowercase ) <= 1 or n <= 1:
return
insert_next(lowercase, n - 1 )
rec_insertion_sort(lowercase, n - 1 )
def a__ ( lowercase : list, lowercase : int ) -> Optional[Any]:
"""simple docstring"""
if index >= len(lowercase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_UpperCamelCase , _UpperCamelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowercase, index + 1 )
if __name__ == "__main__":
lowercase__ : str = input('Enter integers separated by spaces: ')
lowercase__ : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 98 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='CLIPImageProcessor'
__a =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : str , __a : List[Any]=None , __a : Any=None , **__a : int ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : List[Any] , __a : Optional[int]=None , __a : Dict=None , __a : List[Any]=None , **__a : Dict ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_a = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : Tuple , *__a : List[Any] , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[Any] , *__a : Union[str, Any] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='convbert'
def __init__( self : Tuple , __a : Any=3_05_22 , __a : Union[str, Any]=7_68 , __a : Optional[int]=12 , __a : Optional[Any]=12 , __a : List[str]=30_72 , __a : Tuple="gelu" , __a : Any=0.1 , __a : Optional[Any]=0.1 , __a : Union[str, Any]=5_12 , __a : Tuple=2 , __a : int=0.02 , __a : List[Any]=1e-1_2 , __a : int=1 , __a : Tuple=0 , __a : int=2 , __a : List[str]=7_68 , __a : Any=2 , __a : Union[str, Any]=9 , __a : Tuple=1 , __a : str=None , **__a : int , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = embedding_size
_a = head_ratio
_a = conv_kernel_size
_a = num_groups
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 521 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__snake_case ="""platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase_ :
lowerCamelCase : str = PegasusConfig
lowerCamelCase : List[Any] = {}
lowerCamelCase : Union[str, Any] = '''gelu'''
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str=1_3 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Dict=9_9 , UpperCAmelCase__ : Optional[Any]=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : int=3_7 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=2_0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : Optional[int]=0 , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = bos_token_id
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase = prepare_pegasus_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Any:
lowerCAmelCase = 2_0
lowerCAmelCase = model_class_name(UpperCAmelCase__ )
lowerCAmelCase = model.encode(inputs_dict['input_ids'] )
lowerCAmelCase , lowerCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
lowerCAmelCase = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> int:
lowerCAmelCase = 2_0
lowerCAmelCase = model_class_name(UpperCAmelCase__ )
lowerCAmelCase = model.encode(inputs_dict['input_ids'] )
lowerCAmelCase , lowerCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
lowerCAmelCase = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def a_ ( lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : List[Any]=None , lowerCamelCase : List[Any]=None , ):
if attention_mask is None:
lowerCAmelCase = np.not_equal(lowerCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : str = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCamelCase : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = False
lowerCamelCase : str = False
lowerCamelCase : Tuple = False
def __UpperCAmelCase ( self : Tuple ) -> Dict:
lowerCAmelCase = FlaxPegasusModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Any:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Dict ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
lowerCAmelCase = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest('JIT Enabled' ):
lowerCAmelCase = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Any:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('google/pegasus-large' , from_pt=UpperCAmelCase__ )
lowerCAmelCase = np.ones((1, 1) )
lowerCAmelCase = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
lowerCAmelCase = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
lowerCAmelCase = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCAmelCase = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
lowerCAmelCase = tokenizer(UpperCAmelCase__ , return_tensors='np' , truncation=UpperCAmelCase__ , max_length=5_1_2 , padding=UpperCAmelCase__ )
lowerCAmelCase = model.generate(**UpperCAmelCase__ , num_beams=2 ).sequences
lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
assert tgt_text == decoded
| 133 |
'''simple docstring'''
from statistics import mean
import numpy as np
def a_ ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : int ):
lowerCAmelCase = 0
# Number of processes finished
lowerCAmelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase = [0] * no_of_process
# List to include calculation results
lowerCAmelCase = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase = [burst_time[i] for i in np.argsort(lowerCamelCase )]
lowerCAmelCase = [process_name[i] for i in np.argsort(lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase = arrival_time[i]
lowerCAmelCase = 0
# Index showing the location of the process being performed
lowerCAmelCase = 0
# Saves the current response ratio.
lowerCAmelCase = 0
for i in range(0 , lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase = temp
lowerCAmelCase = i
# Calculate the turn around time
lowerCAmelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def a_ ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_process
for i in range(0 , lowerCamelCase ):
lowerCAmelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__snake_case =5
__snake_case =["""A""", """B""", """C""", """D""", """E"""]
__snake_case =[1, 2, 3, 4, 5]
__snake_case =[1, 2, 3, 4, 5]
__snake_case =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__snake_case =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 133 | 1 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = MODEL_FOR_MASKED_LM_MAPPING
__lowerCamelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
A__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
A__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
A__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
A__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
A__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
A__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
A__ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
A__ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase , lowercase )
@slow
@require_torch
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(lowercase )
@slow
@require_tf
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(lowercase )
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
A__ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
A__ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
A__ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
A__ = None
A__ = None
self.run_pipeline_test(lowercase , [] )
@require_tf
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
A__ = None
A__ = None
self.run_pipeline_test(lowercase , [] )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
A__ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
A__ = [
F'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def UpperCamelCase ( self , lowercase , lowercase ) -> Dict:
'''simple docstring'''
A__ = fill_masker.tokenizer
A__ = fill_masker.model
A__ = fill_masker(
F'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
A__ = fill_masker([F'This is a {tokenizer.mask_token}'] )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
A__ = fill_masker([F'This is a {tokenizer.mask_token}', F'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
lowercase , [
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
] , )
with self.assertRaises(lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowercase ):
fill_masker("This is" )
self.run_test_top_k(lowercase , lowercase )
self.run_test_targets(lowercase , lowercase )
self.run_test_top_k_targets(lowercase , lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(lowercase , lowercase )
self.fill_mask_with_multiple_masks(lowercase , lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = tokenizer.get_vocab()
A__ = sorted(vocab.keys() )[:2]
# Pipeline argument
A__ = FillMaskPipeline(model=lowercase , tokenizer=lowercase , targets=lowercase )
A__ = fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
A__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase )
A__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase ) )
# Call argument
A__ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
A__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=lowercase )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
A__ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase )
A__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase ) )
# Score equivalence
A__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=lowercase )
A__ = [top_mask["token_str"] for top_mask in outputs]
A__ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ) == set(lowercase ):
A__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=lowercase )
A__ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
# Raises with invalid
with self.assertRaises(lowercase ):
A__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase ):
A__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[""] )
with self.assertRaises(lowercase ):
A__ = fill_masker(F'This is a {tokenizer.mask_token}' , targets="" )
def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = FillMaskPipeline(model=lowercase , tokenizer=lowercase , top_k=2 )
A__ = fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
A__ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
A__ = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = tokenizer.get_vocab()
A__ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
# top_k=2, ntargets=3
A__ = sorted(vocab.keys() )[:3]
A__ = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
A__ = [el["token_str"] for el in sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ).issubset(lowercase ):
A__ = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
A__ = tokenizer.get_vocab()
# String duplicates + id duplicates
A__ = sorted(vocab.keys() )[:3]
A__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
A__ = fill_masker(F'My name is {tokenizer.mask_token}' , targets=lowercase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase ) , 3 )
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
A__ = fill_masker(
F'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase , [
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
] , )
| 626 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
A__ = word_bank or []
# create a table
A__ = len(SCREAMING_SNAKE_CASE_ ) + 1
A__ = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
table.append([] )
# seed value
A__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(SCREAMING_SNAKE_CASE_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(SCREAMING_SNAKE_CASE_ )] == word:
A__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(SCREAMING_SNAKE_CASE_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(SCREAMING_SNAKE_CASE_ )]:
combination.reverse()
return table[len(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 626 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
A_ = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase__, config=UpperCAmelCase__ )
A_ = downstream_dict["""projector.weight"""]
A_ = downstream_dict["""projector.bias"""]
A_ = downstream_dict["""model.post_net.linear.weight"""]
A_ = downstream_dict["""model.post_net.linear.bias"""]
return model
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
A_ = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase__, config=UpperCAmelCase__ )
A_ = downstream_dict["""model.linear.weight"""]
A_ = downstream_dict["""model.linear.bias"""]
return model
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = WavaVecaForXVector.from_pretrained(UpperCAmelCase__, config=UpperCAmelCase__ )
A_ = downstream_dict["""connector.weight"""]
A_ = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A_ = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
A_ = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
A_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A_ = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )
A_ = checkpoint["""Downstream"""]
A_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
A_ = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase__, return_attention_mask=UpperCAmelCase__, do_normalize=UpperCAmelCase__ )
A_ = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A_ = convert_classification(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
A_ = convert_diarization(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
elif arch.endswith("""ForXVector""" ):
A_ = convert_xvector(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
A_ = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(UpperCAmelCase__ )
hf_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 288 |
'''simple docstring'''
from math import pow
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
A_ = int(pow(UpperCAmelCase__, UpperCAmelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
A_ , A_ = backtrack(
UpperCAmelCase__, UpperCAmelCase__, current_number + 1, UpperCAmelCase__, UpperCAmelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
A_ , A_ = backtrack(
UpperCAmelCase__, UpperCAmelCase__, current_number + 1, UpperCAmelCase__, UpperCAmelCase__ )
return current_sum, solutions_count
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(UpperCAmelCase__, UpperCAmelCase__, 1, 0, 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 1 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCamelCase = False
try:
lowerCamelCase = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowercase__ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : str = None , _UpperCAmelCase : list = [] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = choices
UpperCAmelCase_ = prompt
if sys.platform == "win32":
UpperCAmelCase_ = """*"""
else:
UpperCAmelCase_ = """➔ """
def lowercase__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str = "" ) -> List[str]:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a_ )
else:
forceWrite(self.choices[index] , a_ )
def lowercase__ ( self : List[str] , _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(a_ )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Direction , _UpperCAmelCase : int = 1 ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a_ )
move_cursor(a_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a_ )] for number in range(10 )] )
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = int(chr(self.current_selection ) )
UpperCAmelCase_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a_ )
else:
return
else:
return
def lowercase__ ( self : Tuple , _UpperCAmelCase : int = 0 ) -> List[Any]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a_ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase_ = int(builtins.input() )
except ValueError:
UpperCAmelCase_ = default_choice
else:
UpperCAmelCase_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a_ , "\n" )
return choice
| 705 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = r".*sequential.(\d+).*"
UpperCAmelCase_ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_ = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_ = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_ = 1 if projecton_layer == 0 else 2
UpperCAmelCase_ = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_ = value
UpperCAmelCase_ = mixed_qkv.size(0 ) // 3
UpperCAmelCase_ = mixed_qkv[:qkv_dim]
UpperCAmelCase_ = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_ = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_ = query_layer
UpperCAmelCase_ = key_layer
UpperCAmelCase_ = value_layer
else:
UpperCAmelCase_ = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_ = clap_model.state_dict()
UpperCAmelCase_ = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = ClapConfig()
UpperCAmelCase_ = enable_fusion
UpperCAmelCase_ = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 14 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 12 | '''simple docstring'''
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None ) -> Dict:
__lowerCAmelCase = data
__lowerCAmelCase = previous
__lowerCAmelCase = next_node
def __str__( self : List[Any] ) -> str:
return f"""{self.data}"""
def a ( self : List[Any] ) -> int:
return self.data
def a ( self : Tuple ) -> Dict:
return self.next
def a ( self : Optional[int] ) -> Tuple:
return self.previous
class _lowercase :
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
__lowerCAmelCase = head
def __iter__( self : Tuple ) -> int:
return self
def a ( self : List[str] ) -> Optional[int]:
if not self.current:
raise StopIteration
else:
__lowerCAmelCase = self.current.get_data()
__lowerCAmelCase = self.current.get_next()
return value
class _lowercase :
'''simple docstring'''
def __init__( self : str ) -> str:
__lowerCAmelCase = None # First node in list
__lowerCAmelCase = None # Last node in list
def __str__( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.head
__lowerCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
__lowerCAmelCase = current.get_next()
return " ".join(str(SCREAMING_SNAKE_CASE__ ) for node in nodes )
def __contains__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
__lowerCAmelCase = self.head
while current:
if current.get_data() == value:
return True
__lowerCAmelCase = current.get_next()
return False
def __iter__( self : List[Any] ) -> Dict:
return LinkedListIterator(self.head )
def a ( self : List[str] ) -> Optional[Any]:
if self.head:
return self.head.get_data()
return None
def a ( self : List[str] ) -> int:
if self.tail:
return self.tail.get_data()
return None
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Node ) -> None:
if self.head is None:
__lowerCAmelCase = node
__lowerCAmelCase = node
else:
self.insert_before_node(self.head , SCREAMING_SNAKE_CASE__ )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Node ) -> None:
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE__ )
else:
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCAmelCase = Node(SCREAMING_SNAKE_CASE__ )
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE__ )
else:
self.set_tail(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ) -> None:
__lowerCAmelCase = node
__lowerCAmelCase = node.previous
if node.get_previous() is None:
__lowerCAmelCase = node_to_insert
else:
__lowerCAmelCase = node_to_insert
__lowerCAmelCase = node_to_insert
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ) -> None:
__lowerCAmelCase = node
__lowerCAmelCase = node.next
if node.get_next() is None:
__lowerCAmelCase = node_to_insert
else:
__lowerCAmelCase = node_to_insert
__lowerCAmelCase = node_to_insert
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCAmelCase = 1
__lowerCAmelCase = Node(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return
current_position += 1
__lowerCAmelCase = node.next
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE__ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Node:
__lowerCAmelCase = self.head
while node:
if node.get_data() == item:
return node
__lowerCAmelCase = node.get_next()
raise Exception("""Node not found""" )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
if (node := self.get_node(SCREAMING_SNAKE_CASE__ )) is not None:
if node == self.head:
__lowerCAmelCase = self.head.get_next()
if node == self.tail:
__lowerCAmelCase = self.tail.get_previous()
self.remove_node_pointers(SCREAMING_SNAKE_CASE__ )
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : Node ) -> None:
if node.get_next():
__lowerCAmelCase = node.previous
if node.get_previous():
__lowerCAmelCase = node.next
__lowerCAmelCase = None
__lowerCAmelCase = None
def a ( self : Optional[int] ) -> str:
return self.head is None
def UpperCamelCase_ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 427 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
return False
lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCAmelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : Dict = input("""Enter numbers separated by comma:\n""").strip()
lowercase__ : str = [int(item.strip()) for item in user_input.split(""",""")]
lowercase__ : int = int(input("""Enter the number to be found in the list:\n""").strip())
lowercase__ : Dict = """""" if binary_search(sequence, target) else """not """
print(f'{target} was {not_str}found in {sequence}')
| 317 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : int ):
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( ) -> Tuple:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def UpperCamelCase_ ( ) -> int:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@require_beam
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : Union[str, Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
lowerCAmelCase_ : int = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE__ ( self : str ):
import apache_beam as beam
lowerCAmelCase_ : int = beam.io.parquetio.WriteToParquet
lowerCAmelCase_ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : Dict = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
lowerCAmelCase_ : str = partial(SCREAMING_SNAKE_CASE_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
lowerCAmelCase_ : List[Any] = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , SCREAMING_SNAKE_CASE_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : Dict = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase_ : Union[str, Any] = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
lowerCAmelCase_ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 317 | 1 |
import functools
def lowerCAmelCase__ ( lowerCamelCase_ : list[int] ,lowerCamelCase_ : list[int]):
'''simple docstring'''
if not isinstance(lowerCamelCase_ ,lowerCamelCase_) or not all(isinstance(lowerCamelCase_ ,lowerCamelCase_) for day in days):
raise ValueError('''The parameter days should be a list of integers''')
if len(lowerCamelCase_) != 3 or not all(isinstance(lowerCamelCase_ ,lowerCamelCase_) for cost in costs):
raise ValueError('''The parameter costs should be a list of three integers''')
if len(lowerCamelCase_) == 0:
return 0
if min(lowerCamelCase_) <= 0:
raise ValueError('''All days elements should be greater than 0''')
if max(lowerCamelCase_) >= 366:
raise ValueError('''All days elements should be less than 366''')
lowerCAmelCase__ : Dict = set(lowerCamelCase_)
@functools.cache
def dynamic_programming(lowerCamelCase_ : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) ,costs[1] + dynamic_programming(index + 7) ,costs[2] + dynamic_programming(index + 30) ,)
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = "arrow" ,**__lowerCamelCase ,) -> Dict:
"""simple docstring"""
super().__init__(
split=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,keep_in_memory=__lowerCamelCase ,streaming=__lowerCamelCase ,**__lowerCamelCase ,)
lowerCAmelCase__ : List[Any] = load_from_cache_file
lowerCAmelCase__ : Any = file_format
lowerCAmelCase__ : Dict = Spark(
df=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,working_dir=__lowerCamelCase ,**__lowerCamelCase ,)
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ : List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCamelCase ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 647 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 714 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a :
def __init__( self : Optional[int], SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : List[Any]=2, SCREAMING_SNAKE_CASE_ : str=32, SCREAMING_SNAKE_CASE_ : Union[str, Any]=16, SCREAMING_SNAKE_CASE_ : str=3, SCREAMING_SNAKE_CASE_ : Union[str, Any]=True, SCREAMING_SNAKE_CASE_ : int=True, SCREAMING_SNAKE_CASE_ : Optional[Any]=32, SCREAMING_SNAKE_CASE_ : Any=4, SCREAMING_SNAKE_CASE_ : Dict=[0, 1, 2, 3], SCREAMING_SNAKE_CASE_ : Dict=4, SCREAMING_SNAKE_CASE_ : Union[str, Any]=37, SCREAMING_SNAKE_CASE_ : str="gelu", SCREAMING_SNAKE_CASE_ : Optional[int]=0.1, SCREAMING_SNAKE_CASE_ : List[Any]=0.1, SCREAMING_SNAKE_CASE_ : Dict=0.02, SCREAMING_SNAKE_CASE_ : int=3, SCREAMING_SNAKE_CASE_ : List[str]=[1, 3_84, 24, 24], SCREAMING_SNAKE_CASE_ : Optional[int]=True, SCREAMING_SNAKE_CASE_ : Optional[int]=None, ):
snake_case : int = parent
snake_case : Tuple = batch_size
snake_case : List[str] = image_size
snake_case : Union[str, Any] = patch_size
snake_case : Dict = num_channels
snake_case : int = is_training
snake_case : Any = use_labels
snake_case : Any = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Tuple = backbone_out_indices
snake_case : Dict = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : Any = hidden_act
snake_case : List[str] = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : Tuple = initializer_range
snake_case : Any = num_labels
snake_case : Union[str, Any] = backbone_featmap_shape
snake_case : Optional[int] = scope
snake_case : Tuple = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case : Dict = (image_size // patch_size) ** 2
snake_case : Any = num_patches + 1
def __snake_case ( self : List[Any] ):
snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : List[str] ):
snake_case : Optional[int] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 1_92, 3_84, 7_68],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=SCREAMING_SNAKE_CASE_, backbone_featmap_shape=self.backbone_featmap_shape, )
def __snake_case ( self : List[Any], SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Any ):
snake_case : Dict = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : Optional[Any], SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : Optional[Any] ):
snake_case : int = self.num_labels
snake_case : Any = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) )
def __snake_case ( self : Optional[int], SCREAMING_SNAKE_CASE_ : Optional[Any], SCREAMING_SNAKE_CASE_ : List[Any], SCREAMING_SNAKE_CASE_ : List[Any] ):
snake_case : List[str] = self.num_labels
snake_case : List[Any] = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __snake_case ( self : Union[str, Any] ):
snake_case : Optional[int] = self.prepare_config_and_inputs()
snake_case, snake_case, snake_case : Tuple = config_and_inputs
snake_case : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
_snake_case = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_snake_case = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
def __snake_case ( self : Any ):
snake_case : Optional[Any] = DPTModelTester(self )
snake_case : Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __snake_case ( self : int ):
pass
def __snake_case ( self : List[str] ):
snake_case, snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
snake_case : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_, nn.Linear ) )
def __snake_case ( self : Tuple ):
snake_case, snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(SCREAMING_SNAKE_CASE_ )
snake_case : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[int] = [*signature.parameters.keys()]
snake_case : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case, snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Dict = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
snake_case : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
snake_case : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
snake_case : Dict = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __snake_case ( self : int ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case, snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Dict = False
snake_case : Dict = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
snake_case : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
snake_case : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __snake_case ( self : Tuple ):
snake_case, snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Union[str, Any] = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
snake_case : List[str] = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
snake_case : Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case : List[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=F"""Parameter {name} of model {model_class} seems not properly initialized""", )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __snake_case ( self : Union[str, Any] ):
pass
@slow
def __snake_case ( self : List[Any] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case : int = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case, snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : int = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
snake_case : Any = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def A ( ):
snake_case : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class a ( unittest.TestCase ):
def __snake_case ( self : Dict ):
snake_case : Dict = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
snake_case : int = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
snake_case : str = prepare_img()
snake_case : List[str] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
snake_case : List[Any] = model(**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = outputs.predicted_depth
# verify the predicted depth
snake_case : List[str] = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape, SCREAMING_SNAKE_CASE_ )
snake_case : Dict = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00, SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 555 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self ) -> Any:
_A = 1
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
@property
def UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def UpperCAmelCase ( self ) -> int:
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="""v_prediction""" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
_A = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = """A painting of a squirrel eating a burger"""
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_A = output.images
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase_ , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_A = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self ) -> str:
_A = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="""v_prediction""" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
_A = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = """A painting of a squirrel eating a burger"""
_A = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_A = output.images
assert image.shape[0] == 2
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_A = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.dummy_cond_unet_upscale
_A = DDPMScheduler()
_A = DDIMScheduler(prediction_type="""v_prediction""" )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_A = unet.half()
_A = text_encoder.half()
# make sure here that pndm scheduler skips prk
_A = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_50 , )
_A = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = """A painting of a squirrel eating a burger"""
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , ).images
_A = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_A = """stabilityai/stable-diffusion-x4-upscaler"""
_A = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_A = """a cat sitting on a park bench"""
_A = torch.manual_seed(0 )
_A = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCAmelCase ( self ) -> List[str]:
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_A = """stabilityai/stable-diffusion-x4-upscaler"""
_A = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_A = """a cat sitting on a park bench"""
_A = torch.manual_seed(0 )
_A = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_A = """stabilityai/stable-diffusion-x4-upscaler"""
_A = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = """a cat sitting on a park bench"""
_A = torch.manual_seed(0 )
_A = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , output_type="""np""" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 401 | import tensorflow as tf
from ...tf_utils import shape_list
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase_ )
_A = vocab_size
_A = d_embed
_A = d_proj
_A = cutoffs + [vocab_size]
_A = [0] + self.cutoffs
_A = div_val
_A = self.cutoffs[0]
_A = len(self.cutoffs ) - 1
_A = self.shortlist_size + self.n_clusters
_A = keep_order
_A = []
_A = []
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
if self.n_clusters > 0:
_A = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name="""cluster_weight""" )
_A = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_A = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_A = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , )
_A = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.d_embed // (self.div_val**i)
_A = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' )
self.out_projs.append(lowerCAmelCase_ )
_A = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , )
_A = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[Any]:
_A = x
if proj is not None:
_A = tf.einsum("""ibd,ed->ibe""" , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum("""ibd,nd->ibn""" , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_A = shape_list(lowerCAmelCase_ )
_A = tf.range(lp_size[0] , dtype=target.dtype )
_A = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ) -> Optional[Any]:
_A = 0
if self.n_clusters == 0:
_A = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_A = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_A = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_A = shape_list(lowerCAmelCase_ )
_A = []
_A = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_A = (target >= l_idx) & (target < r_idx)
_A = tf.where(lowerCAmelCase_ )
_A = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_A = self.out_layers[0][0][l_idx:r_idx]
_A = self.out_layers[0][1][l_idx:r_idx]
else:
_A = self.out_layers[i][0]
_A = self.out_layers[i][1]
if i == 0:
_A = tf.concat([cur_W, self.cluster_weight] , 0 )
_A = tf.concat([cur_b, self.cluster_bias] , 0 )
_A = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_A = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_A = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_A = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_A = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_A = tf.nn.log_softmax(lowerCAmelCase_ )
_A = self.cutoffs[0] + i - 1 # No probability for the head cluster
_A = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_A = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_A = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_A = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_A = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_A = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 401 | 1 |
'''simple docstring'''
import numpy as np
def _SCREAMING_SNAKE_CASE( snake_case_ : np.ndarray , snake_case_ : float ) ->np.ndarray:
'''simple docstring'''
return np.where(vector > 0 , UpperCamelCase__ , (alpha * (np.exp(UpperCamelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowerCamelCase__ = 'naver-clova-ix/donut-base'
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = DonutProcessor.from_pretrained(UpperCamelCase_ )
def __lowercase ( self : Tuple ) -> Tuple:
'''simple docstring'''
_lowercase : str = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
_lowercase : List[str] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
_lowercase : str = self.processor.tokenajson(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , UpperCamelCase_ )
| 411 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=32 , lowercase=3 , lowercase=4 , lowercase=[10, 20, 30, 40] , lowercase=[2, 2, 3, 2] , lowercase=True , lowercase=True , lowercase=37 , lowercase="gelu" , lowercase=10 , lowercase=0.02 , lowercase=["stage2", "stage3", "stage4"] , lowercase=[2, 3, 4] , lowercase=None , ) -> Tuple:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = num_labels
A__ = initializer_range
A__ = out_features
A__ = out_indices
A__ = scope
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = ConvNextModel(config=lowercase )
model.to(lowercase )
model.eval()
A__ = model(lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = ConvNextForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A__ = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = ConvNextBackbone(config=lowercase )
model.to(lowercase )
model.eval()
A__ = model(lowercase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A__ = None
A__ = ConvNextBackbone(config=lowercase )
model.to(lowercase )
model.eval()
A__ = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = ConvNextModelTester(self )
A__ = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A__ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase , lowercase ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ConvNextModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(lowercase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowercase , return_tensors="pt" ).to(lowercase )
# forward pass
with torch.no_grad():
A__ = model(**lowercase )
# verify the logits
A__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A__ = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) )
@require_torch
class a__ ( unittest.TestCase , snake_case ):
"""simple docstring"""
__lowerCamelCase = (ConvNextBackbone,) if is_torch_available() else ()
__lowerCamelCase = ConvNextConfig
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = ConvNextModelTester(self )
| 514 |
import torch
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
else:
A__ = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 514 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=_lowercase ):
__magic_name__ : Dict = ["torch", "transformers", "onnx"]
def __init__(self : List[str], *__UpperCAmelCase : Dict, **__UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Optional[Any], *__UpperCAmelCase : Tuple, **__UpperCAmelCase : int ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Any, *__UpperCAmelCase : Tuple, **__UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["torch", "transformers", "onnx"]
def __init__(self : Dict, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Any, *__UpperCAmelCase : Tuple, **__UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : int, *__UpperCAmelCase : List[Any], **__UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[Any] = ["torch", "transformers", "onnx"]
def __init__(self : Dict, *__UpperCAmelCase : Dict, **__UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Tuple, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Union[str, Any], *__UpperCAmelCase : List[str], **__UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["torch", "transformers", "onnx"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : str, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : int, *__UpperCAmelCase : List[Any], **__UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["torch", "transformers", "onnx"]
def __init__(self : Dict, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : List[str], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Any, *__UpperCAmelCase : List[str], **__UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Dict = ["torch", "transformers", "onnx"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Tuple, *__UpperCAmelCase : Any, **__UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase__ (cls : Optional[Any], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch''', '''transformers''', '''onnx'''] )
| 700 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Optional[Any], *__UpperCAmelCase : List[Any], **__UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : int, **__UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : List[str], *__UpperCAmelCase : str, **__UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[str] = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Union[str, Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : List[Any], **__UpperCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : int, **__UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : str, *__UpperCAmelCase : str, **__UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[str] = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : List[str], **__UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Tuple, **__UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : str, *__UpperCAmelCase : Optional[int], **__UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[Any] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : int, **__UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Any, *__UpperCAmelCase : str, **__UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Any, **__UpperCAmelCase : str ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : str = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[int] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : Dict, **__UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Tuple = ["sentencepiece"]
def __init__(self : List[str], *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : int, *__UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Union[str, Any], *__UpperCAmelCase : Tuple, **__UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Any = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : str ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Dict, *__UpperCAmelCase : Any, **__UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : Optional[Any] = ["sentencepiece"]
def __init__(self : List[Any], *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : int ) -> Any:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : int = ["sentencepiece"]
def __init__(self : Tuple, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : int ) -> int:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
class a__ ( metaclass=_lowercase ):
__magic_name__ : List[Any] = ["sentencepiece"]
def __init__(self : Optional[int], *__UpperCAmelCase : str, **__UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
requires_backends(self, ['''sentencepiece'''] )
| 355 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a , __a = None , __a = None , __a = True , __a = None , __a = False , __a = None , __a = True , __a = "arrow" , **__a , ):
"""simple docstring"""
super().__init__(
split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , **__a , )
A__ = load_from_cache_file
A__ = file_format
A__ = Spark(
df=__a , features=__a , cache_dir=__a , working_dir=__a , **__a , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
A__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__a , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 260 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
UpperCAmelCase = input("""Enter image url: """).strip()
print(F'''Downloading image from {url} ...''')
UpperCAmelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
UpperCAmelCase = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
UpperCAmelCase = requests.get(image_url).content
UpperCAmelCase = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 342 | """simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase ( ) -> Union[str, Any]:
_UpperCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_UpperCamelCase = bs[:]
_UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a__ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase = [chr(a__ ) for n in cs]
return dict(zip(a__ , a__ ) )
def lowercase ( a__ : Any ) -> Union[str, Any]:
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
return pairs
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str]="replace" , __UpperCamelCase : Any="<s>" , __UpperCamelCase : List[str]="</s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Any="<s>" , __UpperCamelCase : Tuple="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : List[Any]=False , **__UpperCamelCase : Optional[int] , ) -> Optional[Any]:
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(__UpperCamelCase )
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
_UpperCamelCase = errors # how to handle errors in decoding
_UpperCamelCase = bytes_to_unicode()
_UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle:
_UpperCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCamelCase = {}
_UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCamelCase ( self : Dict ) -> List[Any]:
return len(self.encoder )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self : int , __UpperCamelCase : int ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
_UpperCamelCase = tuple(__UpperCamelCase )
_UpperCamelCase = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
_UpperCamelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(__UpperCamelCase ):
try:
_UpperCamelCase = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase = tuple(__UpperCamelCase )
_UpperCamelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_UpperCamelCase = get_pairs(__UpperCamelCase )
_UpperCamelCase = ''' '''.join(__UpperCamelCase )
_UpperCamelCase = word
return word
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[str] ) -> Optional[int]:
_UpperCamelCase = []
for token in re.findall(self.pat , __UpperCamelCase ):
_UpperCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(''' ''' ) )
return bpe_tokens
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[Any] ) -> Optional[Any]:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return self.decoder.get(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] ) -> Any:
_UpperCamelCase = ''''''.join(__UpperCamelCase )
_UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' )
_UpperCamelCase = 0
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_UpperCamelCase = token_index
writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , __UpperCamelCase : Any , __UpperCamelCase : Tuple=False , **__UpperCamelCase : Optional[int] ) -> Any:
_UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
_UpperCamelCase = ''' ''' + text
return (text, kwargs)
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , ) -> dict:
_UpperCamelCase = super()._pad(
encoded_inputs=__UpperCamelCase , max_length=__UpperCamelCase , padding_strategy=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_UpperCamelCase = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCamelCase = len(encoded_inputs['''global_attention_mask'''] ) != len(__UpperCamelCase )
if needs_to_be_padded:
_UpperCamelCase = len(__UpperCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCamelCase = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCamelCase = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 342 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( lowerCAmelCase_ ) -> None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = analyze_text(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE__ = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE__ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE__ = single_char_strings[ch]
SCREAMING_SNAKE_CASE__ = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCAmelCase_ ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
SCREAMING_SNAKE_CASE__ = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE__ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE__ = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE__ = two_char_strings[sequence]
SCREAMING_SNAKE_CASE__ = int(lowerCAmelCase_ ) / all_sum
my_sec_sum += prob * math.loga(lowerCAmelCase_ )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __snake_case ( lowerCAmelCase_ ) -> tuple[dict, dict]:
SCREAMING_SNAKE_CASE__ = Counter() # type: ignore
SCREAMING_SNAKE_CASE__ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCAmelCase_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ) -> Tuple:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 100 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__lowerCamelCase = None
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = '''▁'''
__lowerCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
__lowerCamelCase = {
'''google/pegasus-xsum''': 512,
}
class UpperCamelCase_ ( snake_case__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PegasusTokenizer
lowercase = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase=None , lowercase=None , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<mask_2>" , lowercase="<mask_1>" , lowercase=None , lowercase=103 , **lowercase , ) -> str:
_a : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(_A , _A ):
raise TypeError(
F'additional_special_tokens should be of type {type(_A )}, but is'
F' {type(_A )}' )
_a : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(_A ) , self.offset - 1 )
]
if len(set(_A ) ) != len(_A ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
_a : Optional[int] = additional_special_tokens_extended
else:
_a : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
_A , tokenizer_file=_A , pad_token=_A , eos_token=_A , unk_token=_A , mask_token=_A , mask_token_sent=_A , offset=_A , additional_special_tokens=_A , **_A , )
_a : Union[str, Any] = vocab_file
_a : Dict = False if not self.vocab_file else True
def snake_case__( self , lowercase ) -> str:
_a : List[str] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case__( self , lowercase , lowercase = None , lowercase = False ) -> List[str]:
if already_has_special_tokens:
return self._special_token_mask(_A )
elif token_ids_a is None:
return self._special_token_mask(_A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case__( self , lowercase , lowercase=None ) -> Dict:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case__( self , lowercase , lowercase = None ) -> int:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_a : Dict = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,) | 711 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_a : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCAmelCase )
_a , _a : List[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase )
else:
_a : Optional[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCAmelCase )
_a , _a : str = ProphetNetForConditionalGeneration.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase )
_a : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
_a : int = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_a : Dict = key.split('''.''' )
if attributes[0] == "lm_head":
_a : List[Any] = prophet
_a : Dict = prophet_old
else:
_a : List[Any] = prophet.prophetnet
_a : List[str] = prophet_old.model
_a : int = False
for attribute in attributes:
if attribute in mapping:
_a : Optional[int] = mapping[attribute]
if not hasattr(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
_a : Optional[Any] = attribute
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
_a : Dict = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_a : str = old_model.weight
logger.info(F'{attribute} is initialized.' )
_a : int = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_a : Optional[Any] = old_model.bias
logger.info(F'{attribute} is initialized' )
_a : List[str] = True
break
elif attribute in special_keys and hasattr(UpperCAmelCase , '''in_proj_weight''' ):
_a : List[Any] = old_model.in_proj_weight.shape[0] // 3
_a : int = getattr(UpperCAmelCase , UpperCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_a : str = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_a : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_a : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_a : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_a : int = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_a : Tuple = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_a : Any = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_a : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_a : str = True
break
if attribute.isdigit():
_a : str = model[int(UpperCAmelCase )]
_a : Any = old_model[int(UpperCAmelCase )]
else:
_a : int = getattr(UpperCAmelCase , UpperCAmelCase )
if old_attribute == "":
_a : Optional[Any] = old_model
else:
if not hasattr(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_a : Tuple = getattr(UpperCAmelCase , UpperCAmelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 307 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A = pd.read_csv("sample_data.csv", header=None)
__A = df.shape[:1][0]
# If you're using some other dataset input the target column
__A = df.iloc[:, 1:2]
__A = actual_data.values.reshape(len_data, 1)
__A = MinMaxScaler().fit_transform(actual_data)
__A = 10
__A = 5
__A = 20
__A = len_data - periods * look_back
__A = actual_data[:division]
__A = actual_data[division - look_back :]
__A , __A = [], []
__A , __A = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A = np.array(train_x)
__A = np.array(test_x)
__A = np.array([list(i.ravel()) for i in train_y])
__A = np.array([list(i.ravel()) for i in test_y])
__A = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__A = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__A = model.predict(x_test)
| 59 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( A , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Union[str, Any] = KandinskyInpaintPipeline
lowercase_ : Tuple = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
lowercase_ : Dict = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowercase_ : Optional[Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase_ : Optional[Any] = False
@property
def UpperCamelCase ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return 1_00
@property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
UpperCAmelCase__ : Any = MultilingualCLIP(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : str = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase__ : List[str] = UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCamelCase ( self : Any ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.dummy_text_encoder
UpperCAmelCase__ : Any = self.dummy_tokenizer
UpperCAmelCase__ : Tuple = self.dummy_unet
UpperCAmelCase__ : str = self.dummy_movq
UpperCAmelCase__ : str = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type="epsilon" , thresholding=snake_case__ , )
UpperCAmelCase__ : List[Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple=0 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case__ )
# create init_image
UpperCAmelCase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
UpperCAmelCase__ : Any = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase__ : Any = 0
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : Tuple = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = "cpu"
UpperCAmelCase__ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase__ : int = self.pipeline_class(**snake_case__ )
UpperCAmelCase__ : List[Any] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : List[str] = pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase__ : int = output.images
UpperCAmelCase__ : Tuple = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Any = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase__ : Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa )
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Tuple = "a hat"
UpperCAmelCase__ : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
UpperCAmelCase__ : int = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase__ : Optional[int] = pipeline(
snake_case__ , image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 199 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __magic_name__ ( lowercase=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
lowercase_ : List[str] = subparsers.add_parser("""test""" )
else:
lowercase_ : Optional[Any] = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=lowercase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def __magic_name__ ( lowercase ) -> str:
"""simple docstring"""
lowercase_ : Union[str, Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
lowercase_ : List[Any] = script_name
else:
lowercase_ : Dict = f"""--config_file={args.config_file} {script_name}"""
lowercase_ : List[str] = ["""accelerate-launch"""] + test_args.split()
lowercase_ : int = execute_subprocess_async(lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def __magic_name__ ( ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[Any] = test_command_parser()
lowercase_ : Any = parser.parse_args()
test_command(lowercase )
if __name__ == "__main__":
main() | 436 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowercase , lowercase , lowercase = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def __magic_name__ ( lowercase , lowercase , lowercase = 10**-1 ) -> bool:
"""simple docstring"""
lowercase_ : NDArray[floataa] = cross(lowercase , lowercase )
lowercase_ : float = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase_ = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
UpperCAmelCase_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase_ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
UpperCAmelCase_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase_ = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
UpperCAmelCase_ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod() | 436 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __A ( _lowercase ):
'''simple docstring'''
_A = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_A = [3, 3, 3, 3]
_A = [5, 5, 5, 5]
elif "fl4" in model_name:
_A = [4, 4, 4, 4]
_A = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_A = [3, 3, 3, 3]
if "lrf" in model_name:
_A = [3, 3, 3, 3]
else:
_A = [2, 2, 2, 2]
if "tiny" in model_name:
_A = 96
elif "small" in model_name:
_A = 96
elif "base" in model_name:
_A = 1_28
elif "large" in model_name:
_A = 1_92
elif "xlarge" in model_name:
_A = 2_56
elif "huge" in model_name:
_A = 3_52
# set label information
_A = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_A = '''imagenet-22k-id2label.json'''
else:
_A = '''imagenet-1k-id2label.json'''
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = {v: k for k, v in idalabel.items()}
_A = FocalNetConfig(
embed_dim=__lowercase , depths=__lowercase , focal_levels=__lowercase , focal_windows=__lowercase , use_conv_embed=__lowercase , idalabel=__lowercase , labelaid=__lowercase , use_post_layernorm=__lowercase , use_layerscale=__lowercase , )
return config
def __A ( _lowercase ):
'''simple docstring'''
if "patch_embed.proj" in name:
_A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_A = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_A = '''encoder.''' + name
if "encoder.layers" in name:
_A = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
_A = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_A = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_A = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_A = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
_A = '''layernorm.weight'''
if name == "norm.bias":
_A = '''layernorm.bias'''
if "head" in name:
_A = name.replace('''head''' , '''classifier''' )
else:
_A = '''focalnet.''' + name
return name
def __A ( _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
_A = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_A = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowercase )
_A = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
_A = state_dict.pop(__lowercase )
_A = val
_A = get_focalnet_config(__lowercase )
_A = FocalNetForImageClassification(__lowercase )
model.eval()
# load state dict
model.load_state_dict(__lowercase )
# verify conversion
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = BitImageProcessor(
do_resize=__lowercase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowercase , crop_size=2_24 , do_normalize=__lowercase , image_mean=__lowercase , image_std=__lowercase , )
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
_A = processor(images=__lowercase , return_tensors='''pt''' )
_A = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_A = image_transforms(__lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowercase , atol=1e-4 )
_A = model(**__lowercase )
_A = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_A = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_A = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_A = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_A = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_A = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_A = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 484 |
"""simple docstring"""
from __future__ import annotations
def _A ( __lowercase ):
"""simple docstring"""
if len(__lowercase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
lowerCamelCase__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129 | 0 |
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = [False] * len(lowerCAmelCase_ )
_snake_case : Tuple = []
queue.append(lowerCAmelCase_ )
_snake_case : Any = True
while queue:
_snake_case : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCAmelCase_ )
_snake_case : Optional[Any] = True
_snake_case : List[str] = u
return visited[t]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = [-1] * (len(lowerCAmelCase_ ))
_snake_case : List[str] = 0
while bfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[Any] = float('''Inf''' )
_snake_case : List[str] = sink
while s != source:
# Find the minimum value in select path
_snake_case : Optional[int] = min(lowerCAmelCase_ , graph[parent[s]][s] )
_snake_case : Union[str, Any] = parent[s]
max_flow += path_flow
_snake_case : Optional[int] = sink
while v != source:
_snake_case : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case : List[Any] = parent[v]
return max_flow
UpperCAmelCase : str = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase : Union[str, Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 712 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase : Any = TypeVar('T')
UpperCAmelCase : str = TypeVar('U')
class lowerCamelCase (Generic[T, U] ):
def __init__( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : str = key
_snake_case : Optional[int] = val
_snake_case : DoubleLinkedListNode[T, U] | None = None
_snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase (Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case , _snake_case : Union[str, Any] = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_snake_case : List[Any] = ['''DoubleLinkedList''']
_snake_case : str = self.head
while node.next is not None:
rep.append(str(lowercase__ ) )
_snake_case : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case : Union[str, Any] = node
_snake_case : Optional[Any] = previous
_snake_case : int = node
_snake_case : Union[str, Any] = self.rear
def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_snake_case : Optional[int] = node.next
_snake_case : Any = node.prev
_snake_case : List[str] = None
_snake_case : Optional[int] = None
return node
class lowerCamelCase (Generic[T, U] ):
_lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
_snake_case : Union[str, Any] = capacity
_snake_case : int = 0
_snake_case : Dict = 0
_snake_case : Union[str, Any] = 0
_snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowercase__ ) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCAmelCase_ ( self , lowercase__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
_snake_case : Tuple = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase__ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case : Optional[Any] = value
self.list.add(lowercase__ )
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowercase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case : Optional[Any] = LRUCache(lowercase__ )
_snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case : Tuple = func(*lowercase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 | 0 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.2_5) = }")
print(f"{price_plus_tax(1_2_5.5_0, 0.0_5) = }")
| 27 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 27 | 1 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _snake_case ( A_ : bytes , A_ : int ):
"""simple docstring"""
a_ : List[str] = f'''{sampling_rate}'''
a_ : Optional[Any] = """1"""
a_ : str = """f32le"""
a_ : Optional[int] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(UpperCamelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
a_ : Dict = ffmpeg_process.communicate(UpperCamelCase__ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
a_ : Union[str, Any] = output_stream[0]
a_ : List[str] = np.frombuffer(UpperCamelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _snake_case ( A_ : int , A_ : float , A_ : str = "f32le" , ):
"""simple docstring"""
a_ : Any = f'''{sampling_rate}'''
a_ : Tuple = """1"""
if format_for_conversion == "s16le":
a_ : Optional[Any] = 2
elif format_for_conversion == "f32le":
a_ : Tuple = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
a_ : Optional[Any] = platform.system()
if system == "Linux":
a_ : Optional[int] = """alsa"""
a_ : Dict = """default"""
elif system == "Darwin":
a_ : Any = """avfoundation"""
a_ : str = """:0"""
elif system == "Windows":
a_ : int = """dshow"""
a_ : Dict = """default"""
a_ : Union[str, Any] = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
a_ : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
a_ : Union[str, Any] = _ffmpeg_stream(UpperCamelCase__ , UpperCamelCase__ )
for item in iterator:
yield item
def _snake_case ( A_ : int , A_ : float , A_ : Optional[int] = None , A_ : Optional[Union[Tuple[float, float], float]] = None , A_ : str = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
a_ : str = stream_chunk_s
else:
a_ : Optional[int] = chunk_length_s
a_ : Union[str, Any] = ffmpeg_microphone(UpperCamelCase__ , UpperCamelCase__ , format_for_conversion=UpperCamelCase__ )
if format_for_conversion == "s16le":
a_ : int = np.intaa
a_ : Dict = 2
elif format_for_conversion == "f32le":
a_ : Optional[int] = np.floataa
a_ : Any = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
a_ : Any = chunk_length_s / 6
a_ : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCamelCase__ , (int, float) ):
a_ : List[Any] = [stride_length_s, stride_length_s]
a_ : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
a_ : Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
a_ : str = datetime.datetime.now()
a_ : Any = datetime.timedelta(seconds=UpperCamelCase__ )
for item in chunk_bytes_iter(UpperCamelCase__ , UpperCamelCase__ , stride=(stride_left, stride_right) , stream=UpperCamelCase__ ):
# Put everything back in numpy scale
a_ : Tuple = np.frombuffer(item["""raw"""] , dtype=UpperCamelCase__ )
a_ : List[str] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
a_ : List[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _snake_case ( A_ : List[Any] , A_ : int , A_ : Tuple[int, int] , A_ : bool = False ):
"""simple docstring"""
a_ : Dict = B""""""
a_ : Optional[int] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
a_ : Dict = 0
for raw in iterator:
acc += raw
if stream and len(UpperCamelCase__ ) < chunk_len:
a_ : Union[str, Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCamelCase__ ) >= chunk_len:
# We are flushing the accumulator
a_ : List[str] = (_stride_left, stride_right)
a_ : int = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
a_ : str = False
yield item
a_ : Union[str, Any] = stride_left
a_ : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCamelCase__ ) > stride_left:
a_ : Dict = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
a_ : List[str] = False
yield item
def _snake_case ( A_ : str , A_ : int ):
"""simple docstring"""
a_ : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(UpperCamelCase__ , stdout=subprocess.PIPE , bufsize=UpperCamelCase__ ) as ffmpeg_process:
while True:
a_ : List[str] = ffmpeg_process.stdout.read(UpperCamelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 720 |
'''simple docstring'''
def _snake_case ( A_ : list ):
"""simple docstring"""
if len(A_ ) <= 1:
return lst
a_ : Any = 1
while i < len(A_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a_ , a_ : int = lst[i], lst[i - 1]
i -= 1
if i == 0:
a_ : List[str] = 1
return lst
if __name__ == "__main__":
__snake_case: List[Any] = input("Enter numbers separated by a comma:\n").strip()
__snake_case: Optional[int] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 460 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( __snake_case ):
SCREAMING_SNAKE_CASE__ ="""mgp-str"""
def __init__( self, _a=[32, 1_28], _a=4, _a=3, _a=27, _a=38, _a=5_02_57, _a=3_05_22, _a=7_68, _a=12, _a=12, _a=4.0, _a=True, _a=False, _a=1E-5, _a=0.0, _a=0.0, _a=0.0, _a=False, _a=0.02, **_a, ) -> List[str]:
super().__init__(**_a )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = max_token_length
__SCREAMING_SNAKE_CASE = num_character_labels
__SCREAMING_SNAKE_CASE = num_bpe_labels
__SCREAMING_SNAKE_CASE = num_wordpiece_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = distilled
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = drop_rate
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = attn_drop_rate
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = output_aa_attentions
__SCREAMING_SNAKE_CASE = initializer_range
| 693 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class UpperCAmelCase ( __snake_case ):
lowercase = """efficientnet"""
def __init__( self : List[str] , __magic_name__ : int = 3 , __magic_name__ : int = 6_0_0 , __magic_name__ : float = 2.0 , __magic_name__ : float = 3.1 , __magic_name__ : int = 8 , __magic_name__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __magic_name__ : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __magic_name__ : List[int] = [] , __magic_name__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ : float = 0.25 , __magic_name__ : str = "swish" , __magic_name__ : int = 2_5_6_0 , __magic_name__ : str = "mean" , __magic_name__ : float = 0.02 , __magic_name__ : float = 0.001 , __magic_name__ : float = 0.99 , __magic_name__ : float = 0.5 , __magic_name__ : float = 0.2 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = width_coefficient
UpperCamelCase = depth_coefficient
UpperCamelCase = depth_divisor
UpperCamelCase = kernel_sizes
UpperCamelCase = in_channels
UpperCamelCase = out_channels
UpperCamelCase = depthwise_padding
UpperCamelCase = strides
UpperCamelCase = num_block_repeats
UpperCamelCase = expand_ratios
UpperCamelCase = squeeze_expansion_ratio
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dim
UpperCamelCase = pooling_type
UpperCamelCase = initializer_range
UpperCamelCase = batch_norm_eps
UpperCamelCase = batch_norm_momentum
UpperCamelCase = dropout_rate
UpperCamelCase = drop_connect_rate
UpperCamelCase = sum(__magic_name__ ) * 4
class UpperCAmelCase ( __snake_case ):
lowercase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return 1e-5
| 386 | 0 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__snake_case = '3'
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 702 | '''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def A_ ( ) ->List[str]:
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase_ = parser.parse_args()
return args.f
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="eval" ) ->Optional[int]:
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{split}_results.json""" )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , """r""" ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
raise ValueError(f"""can't find {path}""" )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( __a ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_flax_glue.main()
lowercase_ = get_results(lowercase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_clm_flax.main()
lowercase_ = get_results(lowercase_ )
self.assertLess(result["""eval_perplexity"""] , 100 )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_summarization_flax.main()
lowercase_ = get_results(lowercase_ , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_mlm_flax.main()
lowercase_ = get_results(lowercase_ )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_ta_mlm_flax.main()
lowercase_ = get_results(lowercase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.4_2 )
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = 7 if get_gpu_count() > 1 else 2
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_flax_ner.main()
lowercase_ = get_results(lowercase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_qa.main()
lowercase_ = get_results(lowercase_ )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 603 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : List[Any] , **lowerCAmelCase : Tuple ):
super().__init__(**lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[str] , lowerCAmelCase : Union[str, List[str], "Image", List["Image"]] , **lowerCAmelCase : Optional[Any] ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def __lowercase ( self : List[Any] , **lowerCAmelCase : str ):
lowerCAmelCase = {}
if "candidate_labels" in kwargs:
lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def __lowercase ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]="This is a photo of {}." ):
lowerCAmelCase = load_image(lowerCAmelCase )
lowerCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase = candidate_labels
lowerCAmelCase = [hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels]
lowerCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=self.framework , padding=lowerCAmelCase )
lowerCAmelCase = [text_inputs]
return inputs
def __lowercase ( self : str , lowerCAmelCase : Optional[int] ):
lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowerCAmelCase ):
lowerCAmelCase = text_inputs[0]
else:
# Batching case.
lowerCAmelCase = text_inputs[0][0]
lowerCAmelCase = self.model(**lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def __lowercase ( self : int , lowerCAmelCase : Optional[Any] ):
lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
lowerCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase = probs.tolist()
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase = [scores]
elif self.framework == "tf":
lowerCAmelCase = stable_softmax(lowerCAmelCase , axis=-1 )
lowerCAmelCase = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase , lowerCAmelCase ) , key=lambda lowerCAmelCase : -x[0] )
]
return result
| 169 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : str=[10, 20, 30, 40] , lowerCAmelCase : Any=[2, 2, 3, 2] , lowerCAmelCase : Any=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : int="gelu" , lowerCAmelCase : List[str]=10 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Tuple=["stage2", "stage3", "stage4"] , lowerCAmelCase : str=[2, 3, 4] , lowerCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_stages
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = scope
def __lowercase ( self : str ):
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Dict ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
lowerCAmelCase = ConvNextVaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str ):
lowerCAmelCase = ConvNextVaForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str ):
lowerCAmelCase = ConvNextVaBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = ConvNextVaBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ):
_a = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_a = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def __lowercase ( self : Any ):
lowerCAmelCase = ConvNextVaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def __lowercase ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Dict ):
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __lowercase ( self : Dict ):
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __lowercase ( self : Dict ):
pass
def __lowercase ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = True
if model_class.__name__ in [
*get_values(lowerCAmelCase ),
*get_values(lowerCAmelCase ),
]:
continue
lowerCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
lowerCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
lowerCAmelCase = model(**lowerCAmelCase ).loss
loss.backward()
def __lowercase ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = False
lowerCAmelCase = True
if (
model_class.__name__
in [*get_values(lowerCAmelCase ), *get_values(lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
lowerCAmelCase = model(**lowerCAmelCase ).loss
loss.backward()
def __lowercase ( self : Dict ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowerCAmelCase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def __lowercase ( self : str ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
lowerCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : int ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def __lowercase ( self : Any ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ConvNextVaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase () -> List[str]:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : int ):
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __lowercase ( self : int ):
lowerCAmelCase = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(lowerCAmelCase )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = preprocessor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**lowerCAmelCase )
# verify the logits
lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor([0.9996, 0.1966, -0.4386] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 169 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
_SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = (3, 32, 1_28)
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase__ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
lowerCAmelCase__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self , **_UpperCamelCase ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase__ ( self , **_UpperCamelCase ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
lowerCAmelCase__ = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 )
lowerCAmelCase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(_UpperCamelCase , return_tensors='np' )
lowerCAmelCase__ = processor(images=_UpperCamelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = """test"""
lowerCAmelCase__ = processor(text=_UpperCamelCase )
lowerCAmelCase__ = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = """test"""
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.char_decode(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.batch_decode(_UpperCamelCase )
lowerCAmelCase__ = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = torch.randn(1 , 27 , 38 )
lowerCAmelCase__ = torch.randn(1 , 27 , 5_02_57 )
lowerCAmelCase__ = torch.randn(1 , 27 , 3_05_22 )
lowerCAmelCase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] ) | 715 |
from __future__ import annotations
__snake_case : Any = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase__ = {}
lowerCAmelCase__ = source_vertex
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {self.source_vertex}
lowerCAmelCase__ = None
lowerCAmelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_UpperCamelCase )
lowerCAmelCase__ = vertex
queue.append(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase__ = self.parent.get(_UpperCamelCase )
if target_vertex_parent is None:
lowerCAmelCase__ = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_UpperCamelCase )
return self.shortest_path(_UpperCamelCase ) + F"->{target_vertex}"
if __name__ == "__main__":
__snake_case : Optional[Any] = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 365 | 0 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
_A = False
while is_sorted is False: # Until all the indices are traversed keep looping
_A = True
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_A, _A = input_list[i + 1], input_list[i]
# swapping if elements not in order
_A = False
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_A, _A = input_list[i + 1], input_list[i]
# swapping if elements not in order
_A = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__A : Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A : List[Any] = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 27 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 0 |
from collections import defaultdict
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Dict:
a__ : int = first_str.lower().strip()
a__ : List[str] = second_str.lower().strip()
# Remove whitespace
a__ : Optional[int] = first_str.replace(" " , "" )
a__ : Dict = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
# Default values for count should be 0
a__ : List[str] = defaultdict(_lowerCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowerCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase = input("""Enter the first string """).strip()
lowerCamelCase = input("""Enter the second string """).strip()
lowerCamelCase = check_anagrams(input_a, input_b)
print(F'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 701 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def SCREAMING_SNAKE_CASE( __UpperCamelCase = 8 ) -> str:
a__ : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
a__ : List[Any] = i // 3
a__ : int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
a__ : Union[str, Any] = (
chars_incl
+ random(__UpperCamelCase , quotient + remainder )
+ random(__UpperCamelCase , __UpperCamelCase )
+ random(__UpperCamelCase , __UpperCamelCase )
)
a__ : Tuple = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
a__ : Dict = any(char in ascii_uppercase for char in password )
a__ : Optional[int] = any(char in ascii_lowercase for char in password )
a__ : Optional[Any] = any(char in digits for char in password )
a__ : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def SCREAMING_SNAKE_CASE( ) -> Dict:
a__ : List[Any] = int(input("Please indicate the max length of your password: " ).strip() )
a__ : Optional[Any] = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(__UpperCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(__UpperCamelCase , __UpperCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 207 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A : Any = 500_000
A , A : List[str] = os.path.split(__file__)
A : Dict = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
_lowercase = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
_lowercase = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( ) -> Tuple:
_lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
_lowercase = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , """dataset.arrow""" ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
_lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Optional[int] ):
return tokenizer(examples["""text"""] )
_lowercase = map(SCREAMING_SNAKE_CASE_ )
_lowercase = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
_lowercase = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type="""numpy""" ):
_lowercase = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type="""pandas""" ):
_lowercase = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
_lowercase = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
_lowercase = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
_lowercase = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
_lowercase = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter() | 287 |
from sklearn.metrics import mean_squared_error
import datasets
A : List[Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A : str = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A : Any = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def UpperCamelCase_ ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="uniform_average" , __UpperCamelCase=True ):
_lowercase = mean_squared_error(
__UpperCamelCase , __UpperCamelCase , sample_weight=__UpperCamelCase , multioutput=__UpperCamelCase , squared=__UpperCamelCase )
return {"mse": mse} | 287 | 1 |
from collections import Counter
from timeit import timeit
def snake_case__ ( __lowercase = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def snake_case__ ( __lowercase = "" ) -> bool:
"""simple docstring"""
if len(__lowercase ) == 0:
return True
A__ : Any = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A__ : dict[str, int] = {}
for character in lower_case_input_str:
A__ : Optional[int] = character_freq_dict.get(__lowercase , 0 ) + 1
A__ : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def snake_case__ ( __lowercase = "" ) -> None:
"""simple docstring"""
print("\nFor string = " , __lowercase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(__lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(__lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
snake_case : Dict = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
snake_case : Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""") | 182 |
from collections import Counter
from timeit import timeit
def snake_case__ ( __lowercase = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def snake_case__ ( __lowercase = "" ) -> bool:
"""simple docstring"""
if len(__lowercase ) == 0:
return True
A__ : Any = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A__ : dict[str, int] = {}
for character in lower_case_input_str:
A__ : Optional[int] = character_freq_dict.get(__lowercase , 0 ) + 1
A__ : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def snake_case__ ( __lowercase = "" ) -> None:
"""simple docstring"""
print("\nFor string = " , __lowercase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(__lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(__lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
snake_case : Dict = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
snake_case : Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""") | 182 | 1 |
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->bool:
"""simple docstring"""
__lowercase : List[Any] = 0
for ch in input_str:
__lowercase : Tuple = ord(_lowerCamelCase )
__lowercase : str = pow(2, _lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Dict = PegasusConfig
__UpperCAmelCase : int = {}
__UpperCAmelCase : Tuple = "gelu"
def __init__( self : List[str] , lowercase__ : int , lowercase__ : Union[str, Any]=1_3 , lowercase__ : Dict=7 , lowercase__ : Optional[Any]=True , lowercase__ : str=False , lowercase__ : Optional[int]=9_9 , lowercase__ : Tuple=3_2 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : Any=3_7 , lowercase__ : Any=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Tuple=2_0 , lowercase__ : str=2 , lowercase__ : int=1 , lowercase__ : Dict=0 , ):
__lowercase : int = parent
__lowercase : str = batch_size
__lowercase : Tuple = seq_length
__lowercase : Tuple = is_training
__lowercase : Dict = use_labels
__lowercase : List[str] = vocab_size
__lowercase : int = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : int = eos_token_id
__lowercase : Union[str, Any] = pad_token_id
__lowercase : Union[str, Any] = bos_token_id
def snake_case ( self : int ):
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase : Optional[Any] = prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def snake_case ( self : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ):
__lowercase : Union[str, Any] = 2_0
__lowercase : List[Any] = model_class_name(lowercase__ )
__lowercase : Tuple = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
__lowercase : List[Any] = model.decode(lowercase__ , lowercase__ )
__lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Optional[Any] ):
__lowercase : Any = 2_0
__lowercase : Any = model_class_name(lowercase__ )
__lowercase : List[Any] = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : str = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Union[str, Any] = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
__lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None, _lowerCamelCase=None, ) ->int:
"""simple docstring"""
if attention_mask is None:
__lowercase : List[str] = np.not_equal(_lowerCamelCase, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCAmelCase : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = FlaxPegasusModelTester(self )
__lowercase : Optional[Any] = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Optional[int] ):
__lowercase ,__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Tuple ):
__lowercase ,__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = self._prepare_for_class(lowercase__ , lowercase__ )
__lowercase : List[str] = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ : List[str] , lowercase__ : int=None , **lowercase__ : Tuple ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest("JIT Enabled" ):
__lowercase : List[Any] = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Optional[Any] = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = model_class(lowercase__ )
__lowercase : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__lowercase : Optional[int] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Any ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest("JIT Enabled" ):
__lowercase : Tuple = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Any = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
__lowercase : int = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowercase__ )
__lowercase : Any = np.ones((1, 1) )
__lowercase : Tuple = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
def snake_case ( self : Optional[int] ):
__lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__lowercase : Optional[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__lowercase : Any = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowercase : Union[str, Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__lowercase : Tuple = tokenizer(lowercase__ , return_tensors="np" , truncation=lowercase__ , max_length=5_1_2 , padding=lowercase__ )
__lowercase : Tuple = model.generate(**lowercase__ , num_beams=2 ).sequences
__lowercase : str = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
assert tgt_text == decoded
| 575 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__snake_case :Optional[int] =logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case :int =argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
__snake_case :int =parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
__snake_case :str =pickle.load(fp)
logger.info('Counting occurrences for MLM.')
__snake_case :str =Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case :List[str] =[0] * args.vocab_size
for k, v in counter.items():
__snake_case :str =v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 719 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : int , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any]=100 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : Optional[Any]=30 , __UpperCamelCase : Any=2 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Tuple=True , __UpperCamelCase : str=True , __UpperCamelCase : int=32 , __UpperCamelCase : int=4 , __UpperCamelCase : int=4 , __UpperCamelCase : Tuple=37 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Tuple=10 , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=[0, 1, 2, 3] , ) -> List[Any]:
A = parent
A = 100
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
A = out_indices
A = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def __UpperCamelCase ( self : List[Any] ) -> Any:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __UpperCamelCase ( self : Any , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : str ) -> List[str]:
A = BeitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] ) -> Tuple:
A = BeitForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
A = self.type_sequence_label_size
A = BeitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = BeitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> List[Any]:
A = self.num_labels
A = BeitForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
A = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
A = self.prepare_config_and_inputs()
A , A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : int = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Union[str, Any] = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : Optional[Any] = False
A_ : str = False
A_ : Any = False
def __UpperCamelCase ( self : str ) -> Optional[int]:
A = BeitModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def __UpperCamelCase ( self : List[str] ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
pass
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __UpperCamelCase ( self : Dict ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCamelCase ), BeitForMaskedImageModeling]:
continue
A = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A = model(**__UpperCamelCase ).loss
loss.backward()
def __UpperCamelCase ( self : List[str] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A = False
A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
A = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A = model(**__UpperCamelCase ).loss
loss.backward()
def __UpperCamelCase ( self : str ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
A = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> str:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = BeitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Dict ) -> Dict:
A = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).pixel_values.to(__UpperCamelCase )
# prepare bool_masked_pos
A = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(pixel_values=__UpperCamelCase , bool_masked_pos=__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCamelCase , atol=1e-2 ) )
@slow
def __UpperCamelCase ( self : str ) -> Dict:
A = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
A = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
A = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
A = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> str:
A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
A = model.to(__UpperCamelCase )
A = BeitImageProcessor(do_resize=__UpperCamelCase , size=640 , do_center_crop=__UpperCamelCase )
A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
A = Image.open(ds[0]['file'] )
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits
# verify the logits
A = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCamelCase )
A = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
A = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCamelCase , )
else:
A = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
A = model.to(__UpperCamelCase )
A = BeitImageProcessor(do_resize=__UpperCamelCase , size=640 , do_center_crop=__UpperCamelCase )
A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
A = Image.open(ds[0]['file'] )
A = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
A = outputs.logits.detach().cpu()
A = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(500, 300)] )
A = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
A = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
A = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase ) | 224 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 476 |
'''simple docstring'''
import math
_lowerCAmelCase = 10
_lowerCAmelCase = 7
_lowerCAmelCase = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( a = 20 ) -> str:
'''simple docstring'''
__magic_name__ = math.comb(a , a )
__magic_name__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a )
__magic_name__ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 432 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =GPTSwaTokenizer
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Dict =True
UpperCamelCase__ : Union[str, Any] =False
def __a ( self :Optional[Any]) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[Any] , _lowercase :Optional[int]) -> Union[str, Any]:
UpperCAmelCase_ = '''This is a test'''
UpperCAmelCase_ = '''This is a test'''
return input_text, output_text
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :int) -> Optional[int]:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(_lowercase) , 2000)
def __a ( self :List[str]) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 2000)
def __a ( self :Tuple) -> Union[str, Any]:
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase) , [465, 287, 265, 631, 842])
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
_lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
# fmt: off
self.assertListEqual(
_lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def __a ( self :Union[str, Any]) -> List[str]:
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase)
UpperCAmelCase_ = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
UpperCAmelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowercase , _lowercase):
self.assertListEqual(tokenizer.encode_fast(_lowercase) , _lowercase)
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowercase , _lowercase):
self.assertEqual(tokenizer.decode_fast(_lowercase) , _lowercase)
@slow
def __a ( self :int) -> List[str]:
UpperCAmelCase_ = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=_lowercase , )
| 561 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def A ( __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser('''test''' )
else:
UpperCAmelCase_ = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=__UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
UpperCAmelCase_ = script_name
else:
UpperCAmelCase_ = f"--config_file={args.config_file} {script_name}"
UpperCAmelCase_ = ['''accelerate-launch'''] + test_args.split()
UpperCAmelCase_ = execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def A ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = test_command_parser()
UpperCAmelCase_ = parser.parse_args()
test_command(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 561 | 1 |
'''simple docstring'''
import heapq
import sys
import numpy as np
a_ : str = tuple[int, int]
class snake_case :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = set()
def snake_case ( self ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def snake_case ( self ):
"""simple docstring"""
return len(self.elements ) == 0
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE__ )
else:
# update
# print("update", item)
lowerCamelCase_ = []
(lowerCamelCase_) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(lowerCamelCase_) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ = []
(lowerCamelCase_) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(lowerCamelCase_) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case ( self ):
"""simple docstring"""
return self.elements[0][1]
def snake_case ( self ):
"""simple docstring"""
(lowerCamelCase_) = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE__ )
return (priority, item)
def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ):
# euclidean distance
lowerCamelCase_ = np.array(snake_case__ )
lowerCamelCase_ = np.array(snake_case__ )
return np.linalg.norm(a - b )
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ):
# integer division by time variable
return consistent_heuristic(snake_case__ , snake_case__ ) // t
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
lowerCamelCase_ = g_function[start] + Wa * heuristics[i](snake_case__ , snake_case__ )
return ans
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ):
lowerCamelCase_ = np.chararray((n, n) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
lowerCamelCase_ = '''*'''
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (j, (n - 1) - i) in blocks:
lowerCamelCase_ = '''#'''
lowerCamelCase_ = '''-'''
lowerCamelCase_ = back_pointer[goal]
while x != start:
(lowerCamelCase_) = x
# print(x)
lowerCamelCase_ = '''-'''
lowerCamelCase_ = back_pointer[x]
lowerCamelCase_ = '''-'''
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
lowerCamelCase_ = back_pointer[goal]
while x != start:
print(snake_case__ , end=" " )
lowerCamelCase_ = back_pointer[x]
print(snake_case__ )
sys.exit()
def __snake_case ( UpperCAmelCase_ : Tuple ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , ):
for itera in range(snake_case__ ):
open_list[itera].remove_element(snake_case__ )
# print("s", s)
# print("j", j)
(lowerCamelCase_) = s
lowerCamelCase_ = (x - 1, y)
lowerCamelCase_ = (x + 1, y)
lowerCamelCase_ = (x, y + 1)
lowerCamelCase_ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(snake_case__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(snake_case__ )
lowerCamelCase_ = -1
lowerCamelCase_ = float("inf" )
if valid(snake_case__ ) and g_function[neighbours] > g_function[s] + 1:
lowerCamelCase_ = g_function[s] + 1
lowerCamelCase_ = s
if neighbours not in close_list_anchor:
open_list[0].put(snake_case__ , key(snake_case__ , 0 , snake_case__ , snake_case__ ) )
if neighbours not in close_list_inad:
for var in range(1 , snake_case__ ):
if key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) <= Wa * key(
snake_case__ , 0 , snake_case__ , snake_case__ ):
open_list[j].put(
snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
def __snake_case ( ):
lowerCamelCase_ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : int = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : int = blocks_blk
# hyper parameters
a_ : Union[str, Any] = 1
a_ : Optional[int] = 1
a_ : Any = 20
a_ : List[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : Tuple = (0, 0)
a_ : int = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCamelCase_ = {start: 0, goal: float("inf" )}
lowerCamelCase_ = {start: -1, goal: -1}
lowerCamelCase_ = []
lowerCamelCase_ = set()
for i in range(snake_case__ ):
open_list.append(PriorityQueue() )
open_list[i].put(snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
lowerCamelCase_ = []
lowerCamelCase_ = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , snake_case__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowerCamelCase_ = open_list[i].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_inad.append(snake_case__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowerCamelCase_ = open_list[0].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , 0 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_anchor.append(snake_case__ )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(snake_case__ ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 675 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ :Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ :int = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
for attribute in key.split('''.''' ):
_UpperCamelCase :Any = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_UpperCamelCase :Any = getattr(snake_case__ , snake_case__ ).shape
else:
_UpperCamelCase :Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_UpperCamelCase :str = value
elif weight_type == "weight_g":
_UpperCamelCase :Dict = value
elif weight_type == "weight_v":
_UpperCamelCase :Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase :str = value
else:
_UpperCamelCase :int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
_UpperCamelCase :Optional[int] = []
_UpperCamelCase :List[str] = fairseq_model.state_dict()
_UpperCamelCase :str = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase :Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase :Dict = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase :Optional[int] = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase :List[str] = True
if "*" in mapped_key:
_UpperCamelCase :List[str] = name.split(snake_case__ )[0].split('''.''' )[-2]
_UpperCamelCase :Tuple = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
_UpperCamelCase :List[Any] = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase :Union[str, Any] = '''weight_v'''
elif "weight" in name:
_UpperCamelCase :List[Any] = '''weight'''
elif "bias" in name:
_UpperCamelCase :List[Any] = '''bias'''
else:
_UpperCamelCase :List[Any] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f"Unused weights: {unused_weights}" )
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
_UpperCamelCase :Optional[int] = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase :Optional[int] = name.split('''.''' )
_UpperCamelCase :Optional[Any] = int(items[0] )
_UpperCamelCase :List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_UpperCamelCase :Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_UpperCamelCase :Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_UpperCamelCase :int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_UpperCamelCase :Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def A_ ( snake_case__ , snake_case__ ) -> List[str]:
_UpperCamelCase :str = SEWConfig()
if is_finetuned:
_UpperCamelCase :Optional[int] = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase :Dict = model.cfg
_UpperCamelCase :Dict = fs_config.conv_bias
_UpperCamelCase :int = eval(fs_config.conv_feature_layers )
_UpperCamelCase :List[Any] = [x[0] for x in conv_layers]
_UpperCamelCase :Optional[int] = [x[1] for x in conv_layers]
_UpperCamelCase :Optional[int] = [x[2] for x in conv_layers]
_UpperCamelCase :str = '''gelu'''
_UpperCamelCase :Optional[int] = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_UpperCamelCase :List[Any] = 0.0
_UpperCamelCase :Optional[int] = fs_config.activation_fn.name
_UpperCamelCase :str = fs_config.encoder_embed_dim
_UpperCamelCase :Dict = 0.02
_UpperCamelCase :Optional[int] = fs_config.encoder_ffn_embed_dim
_UpperCamelCase :str = 1E-5
_UpperCamelCase :int = fs_config.encoder_layerdrop
_UpperCamelCase :Union[str, Any] = fs_config.encoder_attention_heads
_UpperCamelCase :List[str] = fs_config.conv_pos_groups
_UpperCamelCase :List[Any] = fs_config.conv_pos
_UpperCamelCase :List[str] = len(snake_case__ )
_UpperCamelCase :Optional[int] = fs_config.encoder_layers
_UpperCamelCase :Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase :List[Any] = model.cfg
_UpperCamelCase :List[Any] = fs_config.final_dropout
_UpperCamelCase :Dict = fs_config.layerdrop
_UpperCamelCase :Any = fs_config.activation_dropout
_UpperCamelCase :List[str] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase :Optional[Any] = fs_config.attention_dropout
_UpperCamelCase :List[Any] = fs_config.dropout_input
_UpperCamelCase :Dict = fs_config.dropout
_UpperCamelCase :int = fs_config.mask_channel_length
_UpperCamelCase :Tuple = fs_config.mask_channel_prob
_UpperCamelCase :int = fs_config.mask_length
_UpperCamelCase :Dict = fs_config.mask_prob
_UpperCamelCase :List[Any] = '''Wav2Vec2FeatureExtractor'''
_UpperCamelCase :Optional[Any] = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def A_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ) -> int:
if is_finetuned:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase :Any = SEWConfig.from_pretrained(snake_case__ )
else:
_UpperCamelCase :List[str] = convert_config(model[0] , snake_case__ )
_UpperCamelCase :List[str] = model[0].eval()
_UpperCamelCase :Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
_UpperCamelCase :str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
if is_finetuned:
if dict_path:
_UpperCamelCase :int = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase :List[Any] = target_dict.pad_index
_UpperCamelCase :str = target_dict.bos_index
_UpperCamelCase :int = target_dict.pad_index
_UpperCamelCase :Dict = target_dict.bos_index
_UpperCamelCase :int = target_dict.eos_index
_UpperCamelCase :str = len(target_dict.symbols )
_UpperCamelCase :Optional[int] = os.path.join(snake_case__ , '''vocab.json''' )
if not os.path.isdir(snake_case__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
_UpperCamelCase :int = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case__ , )
_UpperCamelCase :Dict = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
_UpperCamelCase :Tuple = SEWForCTC(snake_case__ )
else:
_UpperCamelCase :str = SEWModel(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase__ :List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase__ :Tuple = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 355 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"""vocab_file""": """spiece.model"""}
lowerCamelCase_ = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
lowerCamelCase_ = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
class UpperCamelCase_ (__UpperCAmelCase ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = []
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str="<unk>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : List[Any]="<pad>" , lowerCAmelCase_ : List[Any]="[SEP]" , lowerCAmelCase_ : Dict="[MASK]" , lowerCAmelCase_ : List[str]="[CLS]" , lowerCAmelCase_ : str = None , **lowerCAmelCase_ : List[Any] , ) -> List[Any]:
UpperCAmelCase_ : int = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
UpperCAmelCase_ : str = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : int = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
UpperCAmelCase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.__dict__.copy()
UpperCAmelCase_ : Dict = None
return state
def __setstate__( self : Any , lowerCAmelCase_ : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : List[Any] ) -> Dict:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int ) -> Optional[Any]:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> Dict:
UpperCAmelCase_ : int = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] ) -> Any:
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : int = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] = False , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : int = True , **lowerCAmelCase_ : Optional[int] , ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = kwargs.pop("use_source_tokenizer" , lowerCAmelCase_ )
UpperCAmelCase_ : Any = self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
UpperCAmelCase_ : Tuple = []
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCAmelCase_ : Any = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(lowerCAmelCase_ ) )
else:
UpperCAmelCase_ : int = "".join(lowerCAmelCase_ )
UpperCAmelCase_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ : Optional[Any] = self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] = None ) -> List[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : str = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , "wb" ) as fi:
UpperCAmelCase_ : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] = None ) -> List[str]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = None , lowerCAmelCase_ : List[Any] = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int = None ) -> Tuple:
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 717 |
"""simple docstring"""
class UpperCamelCase_ :
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None ) -> int:
UpperCAmelCase_ : int = data
UpperCAmelCase_ : Optional[int] = previous
UpperCAmelCase_ : int = next_node
def __str__( self : Dict ) -> str:
return f"""{self.data}"""
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.data
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return self.next
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return self.previous
class UpperCamelCase_ :
def __init__( self : Any , lowerCAmelCase_ : str ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = head
def __iter__( self : Any ) -> List[str]:
return self
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
if not self.current:
raise StopIteration
else:
UpperCAmelCase_ : Optional[int] = self.current.get_data()
UpperCAmelCase_ : Dict = self.current.get_next()
return value
class UpperCamelCase_ :
def __init__( self : List[str] ) -> Tuple:
UpperCAmelCase_ : Tuple = None # First node in list
UpperCAmelCase_ : Union[str, Any] = None # Last node in list
def __str__( self : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.head
UpperCAmelCase_ : int = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase_ : Optional[Any] = current.get_next()
return " ".join(str(lowerCAmelCase_ ) for node in nodes )
def __contains__( self : int , lowerCAmelCase_ : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase_ : Union[str, Any] = current.get_next()
return False
def __iter__( self : int ) -> Tuple:
return LinkedListIterator(self.head )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> None:
if self.head is None:
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : Union[str, Any] = node
else:
self.insert_before_node(self.head , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Node ) -> None:
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.insert_after_node(self.tail , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : Optional[Any] = Node(lowerCAmelCase_ )
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.set_tail(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> None:
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Tuple = node.previous
if node.get_previous() is None:
UpperCAmelCase_ : List[Any] = node_to_insert
else:
UpperCAmelCase_ : Dict = node_to_insert
UpperCAmelCase_ : Dict = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> None:
UpperCAmelCase_ : Dict = node
UpperCAmelCase_ : int = node.next
if node.get_next() is None:
UpperCAmelCase_ : int = node_to_insert
else:
UpperCAmelCase_ : Optional[Any] = node_to_insert
UpperCAmelCase_ : Any = node_to_insert
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = Node(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ )
return
current_position += 1
UpperCAmelCase_ : Optional[Any] = node.next
self.insert_after_node(self.tail , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Node:
UpperCAmelCase_ : List[Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase_ : Optional[int] = node.get_next()
raise Exception("Node not found" )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
if (node := self.get_node(lowerCAmelCase_ )) is not None:
if node == self.head:
UpperCAmelCase_ : Tuple = self.head.get_next()
if node == self.tail:
UpperCAmelCase_ : Optional[int] = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : Node ) -> None:
if node.get_next():
UpperCAmelCase_ : int = node.previous
if node.get_previous():
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Dict = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.head is None
def snake_case ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 180 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_UpperCamelCase = datasets.utils.logging.get_logger(__name__)
_UpperCamelCase = ['''names''', '''prefix''']
_UpperCamelCase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
_UpperCamelCase = ['''encoding_errors''', '''on_bad_lines''']
_UpperCamelCase = ['''date_format''']
@dataclass
class _lowerCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase_ : str =","
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[Union[int, List[int], str]] ="infer"
UpperCAmelCase_ : Optional[List[str]] =None
UpperCAmelCase_ : Optional[List[str]] =None
UpperCAmelCase_ : Optional[Union[int, str, List[int], List[str]]] =None
UpperCAmelCase_ : Optional[Union[List[int], List[str]]] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : Optional[Literal["c", "python", "pyarrow"]] =None
UpperCAmelCase_ : Dict[Union[int, str], Callable[[Any], Any]] =None
UpperCAmelCase_ : Optional[list] =None
UpperCAmelCase_ : Optional[list] =None
UpperCAmelCase_ : bool =False
UpperCAmelCase_ : Optional[Union[int, List[int]]] =None
UpperCAmelCase_ : Optional[int] =None
UpperCAmelCase_ : Optional[Union[str, List[str]]] =None
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =False
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : str ="."
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : str ='"'
UpperCAmelCase_ : int =0
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : int =0
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =False
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : int =10_000
UpperCAmelCase_ : Optional[datasets.Features] =None
UpperCAmelCase_ : Optional[str] ="strict"
UpperCAmelCase_ : Literal["error", "warn", "skip"] ="error"
UpperCAmelCase_ : Optional[str] =None
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
if self.delimiter is not None:
__snake_case : List[str] = self.delimiter
if self.column_names is not None:
__snake_case : Optional[Any] = self.column_names
@property
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , UpperCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase_ : Dict =CsvConfig
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase , (str, list, tuple) ):
__snake_case : int = data_files
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : Any = [files]
__snake_case : Optional[int] = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
__snake_case : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : str = [files]
__snake_case : int = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase ( self , UpperCAmelCase ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
__snake_case : str = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
__snake_case : Optional[int] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=UpperCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__snake_case : List[str] = table_cast(UpperCAmelCase , UpperCAmelCase )
return pa_table
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__snake_case : Union[str, Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase ) ):
__snake_case : Tuple = pd.read_csv(UpperCAmelCase , iterator=UpperCAmelCase , dtype=UpperCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase ):
__snake_case : List[str] = pa.Table.from_pandas(UpperCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase )}: {e}""" )
raise
| 243 | 0 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0.0 , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = "layer_norm" , __UpperCAmelCase = False , ) -> Optional[Any]:
super().__init__()
_a = only_cross_attention
_a = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
_a = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_a = AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase )
elif self.use_ada_layer_norm_zero:
_a = AdaLayerNormZero(__UpperCAmelCase , __UpperCAmelCase )
else:
_a = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
_a = Attention(
query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__UpperCAmelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_a = (
AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
)
_a = Attention(
query_dim=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , upcast_attention=__UpperCAmelCase , ) # is self-attn if encoder_hidden_states is none
else:
_a = None
_a = None
# 3. Feed-forward
_a = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
_a = FeedForward(__UpperCAmelCase , dropout=__UpperCAmelCase , activation_fn=__UpperCAmelCase , final_dropout=__UpperCAmelCase )
# let chunk size default to None
_a = None
_a = 0
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
# Sets chunk feed-forward
_a = chunk_size
_a = dim
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> Tuple:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
_a = self.norma(__UpperCAmelCase , __UpperCAmelCase )
elif self.use_ada_layer_norm_zero:
_a , _a , _a , _a , _a = self.norma(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hidden_dtype=hidden_states.dtype )
else:
_a = self.norma(__UpperCAmelCase )
_a = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_a = self.attna(
__UpperCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
if self.use_ada_layer_norm_zero:
_a = gate_msa.unsqueeze(1 ) * attn_output
_a = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_a = (
self.norma(__UpperCAmelCase , __UpperCAmelCase ) if self.use_ada_layer_norm else self.norma(__UpperCAmelCase )
)
_a = self.attna(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
_a = attn_output + hidden_states
# 3. Feed-forward
_a = self.norma(__UpperCAmelCase )
if self.use_ada_layer_norm_zero:
_a = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
_a = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_a = torch.cat(
[self.ff(__UpperCAmelCase ) for hid_slice in norm_hidden_states.chunk(__UpperCAmelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_a = self.ff(__UpperCAmelCase )
if self.use_ada_layer_norm_zero:
_a = gate_mlp.unsqueeze(1 ) * ff_output
_a = ff_output + hidden_states
return hidden_states
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = "geglu" , __UpperCAmelCase = False , ) -> List[Any]:
super().__init__()
_a = int(dim * mult )
_a = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_a = GELU(__UpperCAmelCase , __UpperCAmelCase )
if activation_fn == "gelu-approximate":
_a = GELU(__UpperCAmelCase , __UpperCAmelCase , approximate='''tanh''' )
elif activation_fn == "geglu":
_a = GEGLU(__UpperCAmelCase , __UpperCAmelCase )
elif activation_fn == "geglu-approximate":
_a = ApproximateGELU(__UpperCAmelCase , __UpperCAmelCase )
_a = nn.ModuleList([] )
# project in
self.net.append(__UpperCAmelCase )
# project dropout
self.net.append(nn.Dropout(__UpperCAmelCase ) )
# project out
self.net.append(nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__UpperCAmelCase ) )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
for module in self.net:
_a = module(__UpperCAmelCase )
return hidden_states
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "none" ) -> Union[str, Any]:
super().__init__()
_a = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
_a = approximate
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
if gate.device.type != "mps":
return F.gelu(__UpperCAmelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = self.proj(__UpperCAmelCase )
_a = self.gelu(__UpperCAmelCase )
return hidden_states
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
super().__init__()
_a = nn.Linear(__UpperCAmelCase , dim_out * 2 )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if gate.device.type != "mps":
return F.gelu(__UpperCAmelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
_a , _a = self.proj(__UpperCAmelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
super().__init__()
_a = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str:
_a = self.proj(__UpperCAmelCase )
return x * torch.sigmoid(1.702 * x )
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
super().__init__()
_a = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase )
_a = nn.SiLU()
_a = nn.Linear(__UpperCAmelCase , embedding_dim * 2 )
_a = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = self.linear(self.silu(self.emb(__UpperCAmelCase ) ) )
_a , _a = torch.chunk(__UpperCAmelCase , 2 )
_a = self.norm(__UpperCAmelCase ) * (1 + scale) + shift
return x
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
super().__init__()
_a = CombinedTimestepLabelEmbeddings(__UpperCAmelCase , __UpperCAmelCase )
_a = nn.SiLU()
_a = nn.Linear(__UpperCAmelCase , 6 * embedding_dim , bias=__UpperCAmelCase )
_a = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase , eps=1e-6 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
_a = self.linear(self.silu(self.emb(__UpperCAmelCase , __UpperCAmelCase , hidden_dtype=__UpperCAmelCase ) ) )
_a , _a , _a , _a , _a , _a = emb.chunk(6 , dim=1 )
_a = self.norm(__UpperCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 1e-5 ) -> Any:
super().__init__()
_a = num_groups
_a = eps
if act_fn is None:
_a = None
else:
_a = get_activation(__UpperCAmelCase )
_a = nn.Linear(__UpperCAmelCase , out_dim * 2 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
if self.act:
_a = self.act(__UpperCAmelCase )
_a = self.linear(__UpperCAmelCase )
_a = emb[:, :, None, None]
_a , _a = emb.chunk(2 , dim=1 )
_a = F.group_norm(__UpperCAmelCase , self.num_groups , eps=self.eps )
_a = x * (1 + scale) + shift
return x | 285 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10**12 ):
"""simple docstring"""
_a = 1
_a = 0
_a = 1
_a = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'{solution() = }') | 285 | 1 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = '''Hello world! cécé herlolip'''
lowerCAmelCase__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=A__ , large=A__ , share_emb=A__ , use_bert_emb=A__ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(A__ , lambda A__ , A__ : storage )
__lowercase = AbsSummarizer(A__ , torch.device('''cpu''' ) , A__ )
original.eval()
__lowercase = BertAbsSummarizer(A__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__lowercase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
__lowercase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(A__ , A__ , A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = original.generator(A__ )
__lowercase = new_model(
A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = new_model.generator(A__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.allclose(A__ , A__ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 41 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(__lowercase , __lowercase ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
_A = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_A = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BICUBIC , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ = do_convert_rgb
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BICUBIC , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size=size["shortest_edge"] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="size" , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ = [convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 1 |
"""simple docstring"""
import os
import sys
import unittest
__A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A = os.path.join(git_repo_path, """src""", """transformers""")
__A = """
{0} = None
"""
__A = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__A = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(__UpperCAmelCase )
lowerCAmelCase__ :int = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(__UpperCAmelCase , 'tokenizers' )
lowerCAmelCase__ :List[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(__UpperCAmelCase , 'tensorflow_text' )
lowerCAmelCase__ :List[Any] = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(__UpperCAmelCase , 'sentencepiece_and_tokenizers' )
lowerCAmelCase__ :str = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(__UpperCAmelCase , 'sentencepiece_and_tensorflow_text' )
lowerCAmelCase__ :Any = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(__UpperCAmelCase , 'sentencepiece_and_tokenizers_and_vision' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , __UpperCAmelCase )
self.assertIn('tensorflow_text' , __UpperCAmelCase )
self.assertIn('sentencepiece_and_tokenizers' , __UpperCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(__UpperCAmelCase , '\nCONSTANT = None\n' )
lowerCAmelCase__ :Optional[int] = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
__UpperCAmelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
lowerCAmelCase__ :Union[str, Any] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
lowerCAmelCase__ :Union[str, Any] = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
lowerCAmelCase__ :Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , __UpperCAmelCase )
| 93 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def snake_case ( a_ : float , a_ : float , a_ : float ) -> tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : str = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = '''sew'''
def __init__( self : Union[str, Any] , __lowercase : int=32 , __lowercase : Optional[int]=7_68 , __lowercase : Tuple=12 , __lowercase : Optional[Any]=12 , __lowercase : Optional[Any]=30_72 , __lowercase : Dict=2 , __lowercase : List[Any]="gelu" , __lowercase : Any=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : str=0.0 , __lowercase : int=0.1 , __lowercase : Dict=0.1 , __lowercase : int=0.02 , __lowercase : Optional[int]=1E-5 , __lowercase : List[Any]="group" , __lowercase : str="gelu" , __lowercase : Union[str, Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , __lowercase : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowercase : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowercase : Tuple=False , __lowercase : Optional[int]=1_28 , __lowercase : Dict=16 , __lowercase : List[str]=True , __lowercase : Any=0.05 , __lowercase : List[Any]=10 , __lowercase : List[str]=2 , __lowercase : Dict=0.0 , __lowercase : List[Any]=10 , __lowercase : Dict=0 , __lowercase : str="mean" , __lowercase : Dict=False , __lowercase : List[Any]=False , __lowercase : Optional[Any]=2_56 , __lowercase : List[str]=0 , __lowercase : Any=1 , __lowercase : Optional[int]=2 , **__lowercase : int , ):
"""simple docstring"""
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(__lowercase )
snake_case_ = list(__lowercase )
snake_case_ = list(__lowercase )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = squeeze_factor
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# sequence classification
snake_case_ = use_weighted_layer_sum
snake_case_ = classifier_proj_size
@property
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 139 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowercase__ : Any = namedtuple("CoinsDistribResult", "moves excess")
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_A ) != count_coins(_A ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case_ , snake_case_ = get_distrib(node.left )
snake_case_ , snake_case_ = get_distrib(node.right )
snake_case_ = 1 - left_distrib_excess
snake_case_ = 1 - right_distrib_excess
snake_case_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(_A )
+ abs(_A )
)
snake_case_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_A , _A )
return get_distrib(_A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 | 1 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCAmelCase_ ( yaml.SafeLoader ):
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
__lowercase : Dict = [self.constructed_objects[key_node] for key_node, _ in node.value]
__lowercase : Dict = [tuple(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else key for key in keys]
__lowercase : Optional[int] = Counter(SCREAMING_SNAKE_CASE__ )
__lowercase : Dict = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False ) -> Union[str, Any]:
__lowercase : List[Any] = super().construct_mapping(SCREAMING_SNAKE_CASE__ , deep=SCREAMING_SNAKE_CASE__ )
self._check_no_duplicates_on_constructed_node(SCREAMING_SNAKE_CASE__ )
return mapping
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__lowercase : int = full_content[1:].index('''---''' ) + 1
__lowercase : Any = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCamelCase_ )
class UpperCAmelCase_ ( __lowerCamelCase ):
# class attributes
UpperCamelCase ={'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ ) -> Dict:
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as readme_file:
__lowercase : Optional[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(SCREAMING_SNAKE_CASE__ )
else:
return cls()
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
if path.exists():
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as readme_file:
__lowercase : Optional[Any] = readme_file.read()
else:
__lowercase : str = None
__lowercase : Optional[int] = self._to_readme(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , UpperCamelCase_ = None ) -> Tuple:
if readme_content is not None:
__lowercase : str = _split_yaml_from_readme(SCREAMING_SNAKE_CASE__ )
__lowercase : List[str] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__lowercase : Tuple = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ ) -> Dict:
__lowercase : Any = yaml.load(SCREAMING_SNAKE_CASE__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__lowercase : str = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Tuple:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' , ).decode('''utf-8''' )
a_ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a_ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
a_ = ap.parse_args()
a_ = Path(args.readme_filepath)
a_ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 76 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
SCREAMING_SNAKE_CASE__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
__a : str = torch.load(lowerCamelCase_ , map_location='cpu' )
return sd
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ):
__a : Optional[Any] = OrderedDict()
__a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__a : List[Any] = key
for name_pair in rename_keys_prefix:
__a : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__a : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__a : int = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__a : Dict = 'pretraining'
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
__a : int = {'visual_embedding_dim': 2_0_4_8}
elif "vqa" in checkpoint_path:
__a : Tuple = {'visual_embedding_dim': 2_0_4_8}
elif "nlvr" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 1_0_2_4}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__a : int = {'visual_embedding_dim': 5_1_2}
__a : Any = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__a : Any = {'visual_embedding_dim': 2_0_4_8}
__a : List[str] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9}
__a : List[Any] = 'vqa'
elif "nlvr" in checkpoint_path:
__a : Optional[int] = {
'visual_embedding_dim': 1_0_2_4,
'num_labels': 2,
}
__a : Optional[Any] = 'nlvr'
__a : str = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
__a : str = load_state_dict(lowerCamelCase_ )
__a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
__a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
__a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
__a : int = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
__a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 0 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A ( UpperCAmelCase ):
def __init__( self : Optional[Any] , __a : int = 1_0_1 ) -> str:
__UpperCAmelCase = length
def __len__( self : Tuple ) -> List[str]:
return self.length
def __getitem__( self : List[Any] , __a : Any ) -> int:
return i
class A :
def __call__( self : List[str] , __a : Any ) -> int:
return {"input_ids": torch.tensor(__a ), "labels": torch.tensor(__a )}
class A ( nn.Module ):
def __init__( self : str ) -> Any:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__UpperCAmelCase = nn.Linear(1_2_0 , 8_0 )
def snake_case__ ( self : int , __a : Union[str, Any] , __a : List[Any]=None ) -> Dict:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class A ( UpperCAmelCase ):
@require_torch_neuroncore
def snake_case__ ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""--output_dir {output_dir}""".split()
__UpperCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A ( UpperCAmelCase ):
@require_torch_multi_gpu
def snake_case__ ( self : int ) -> Any:
__UpperCAmelCase = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""--output_dir {output_dir}""".split()
__UpperCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase : Optional[Any] = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase : Dict = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__lowerCAmelCase : List[Any] = DummyDataset(dataset_length)
def lowerCAmelCase ( UpperCamelCase__ : EvalPrediction ):
"""simple docstring"""
__UpperCAmelCase = list(range(len(UpperCamelCase__ ) ) )
__UpperCAmelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__lowerCAmelCase : str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase : Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : Optional[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : str = None
| 711 | '''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Any ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCAmelCase_ =VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2 )
UpperCAmelCase_ =[
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int ) -> Any:
'''simple docstring'''
for example in examples:
UpperCAmelCase_ =video_classifier(_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"score": ANY(_lowerCAmelCase ), "label": ANY(_lowerCAmelCase )},
{"score": ANY(_lowerCAmelCase ), "label": ANY(_lowerCAmelCase )},
] , )
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ ="hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCAmelCase_ =VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
UpperCAmelCase_ =pipeline(
"video-classification" , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4 )
UpperCAmelCase_ =hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCAmelCase_ =video_classifier(_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}] , )
UpperCAmelCase_ =video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
] , )
@require_tf
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
pass
| 54 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _SCREAMING_SNAKE_CASE () -> Dict:
"""simple docstring"""
lowercase__ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowercase__ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(A )
DownloadCommand.register_subcommand(A )
EnvironmentCommand.register_subcommand(A )
RunCommand.register_subcommand(A )
ServeCommand.register_subcommand(A )
UserCommands.register_subcommand(A )
AddNewModelCommand.register_subcommand(A )
AddNewModelLikeCommand.register_subcommand(A )
LfsCommands.register_subcommand(A )
PTtoTFCommand.register_subcommand(A )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(A , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowercase__ = args.func(A )
service.run()
if __name__ == "__main__":
main()
| 460 | 0 |
"""simple docstring"""
import qiskit
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: Dict = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_lowercase: str = qiskit.QuantumCircuit(_UpperCamelCase , _UpperCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowercase: Tuple = qiskit.execute(_UpperCamelCase , _UpperCamelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCamelCase )
if __name__ == "__main__":
A__ : Optional[int] = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 272 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if name is None:
_lowercase: str = None
else:
_lowercase: Optional[Any] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
_lowercase: Union[str, Any] = fmt.format(_UpperCamelCase )
# Print and recurse (if needed).
if isinstance(_UpperCamelCase , _UpperCamelCase ):
if msg is not None:
print(_UpperCamelCase )
for k in val.keys():
recursive_print(_UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(_UpperCamelCase , torch.Tensor ):
print(_UpperCamelCase , ''':''' , val.size() )
else:
print(_UpperCamelCase , ''':''' , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowercase: Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowercase: Any = param.view(*_UpperCamelCase )
_lowercase: Optional[Any] = param.transpose(0 , 2 )
_lowercase: List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowercase: int = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowercase: Optional[Any] = param.view(*_UpperCamelCase )
_lowercase: Dict = param.transpose(0 , 1 ).contiguous()
_lowercase: Optional[Any] = param.view(*_UpperCamelCase )
return param
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[Any] = {}
# old versions did not store training args
_lowercase: int = input_state_dict.get('''args''' , _UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowercase: str = ds_args.padded_vocab_size
_lowercase: Dict = ds_args.max_position_embeddings
_lowercase: List[str] = ds_args.hidden_size
_lowercase: List[Any] = ds_args.num_layers
_lowercase: Optional[int] = ds_args.num_attention_heads
_lowercase: Any = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowercase: Optional[int] = config.n_head
# The hidden_size per head.
_lowercase: Dict = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowercase: List[str] = input_state_dict['''checkpoint_version''']
else:
_lowercase: List[Any] = 0.0
# The model.
_lowercase: Dict = input_state_dict['''model''']
# The language model.
_lowercase: str = model['''language_model''']
# The embeddings.
_lowercase: List[Any] = lm['''embedding''']
# The word embeddings.
_lowercase: Tuple = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
_lowercase: Any = word_embeddings[: config.vocab_size, :]
_lowercase: Optional[Any] = word_embeddings
# The position embeddings.
_lowercase: Optional[Any] = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowercase: List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_lowercase: Any = pos_embeddings
# The transformer.
_lowercase: Optional[Any] = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
_lowercase: str = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
_lowercase: Any = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowercase: str = layer_re.match(_UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowercase: Optional[Any] = int(m.group(1 ) )
# The name of the operation.
_lowercase: Tuple = m.group(2 )
# Is it a weight or a bias?
_lowercase: Any = m.group(3 )
# The name of the layer.
_lowercase: Dict = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
_lowercase: Any = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
_lowercase: Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowercase: Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _UpperCamelCase , _UpperCamelCase )
_lowercase: List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowercase: Any = torch.tensor(-1e4 , dtype=torch.floataa )
_lowercase: Tuple = masked_bias
_lowercase: List[str] = fix_query_key_value_ordering(_UpperCamelCase , _UpperCamelCase , 3 , _UpperCamelCase , _UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowercase: str = out_val.transpose(0 , 1 ).contiguous()
# Store.
_lowercase: Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowercase: List[Any] = fix_query_key_value_ordering(_UpperCamelCase , _UpperCamelCase , 3 , _UpperCamelCase , _UpperCamelCase )
# Store. No change of shape.
_lowercase: Union[str, Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowercase: str = megatron_to_transformers[op_name]
_lowercase: str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowercase: List[Any] = megatron_to_transformers[op_name]
_lowercase: Optional[int] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowercase: str = transformer['''final_layernorm.weight''']
_lowercase: Dict = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
_lowercase: Dict = word_embeddings
# It should be done!
return output_state_dict
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[Any] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=_UpperCamelCase , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=_UpperCamelCase , help='''An optional config json file describing the pre-trained model.''' , )
_lowercase: int = parser.parse_args()
# Extract the basename.
_lowercase: Tuple = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
_lowercase: str = torch.load(_UpperCamelCase , map_location='''cpu''' )
else:
_lowercase: Optional[int] = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
_lowercase: Dict = input_state_dict.get('''args''' , _UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowercase: List[str] = '''gelu_fast'''
elif ds_args.openai_gelu:
_lowercase: Optional[int] = '''gelu_new'''
else:
_lowercase: Any = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
_lowercase: Optional[int] = '''gelu_new'''
# Spell out all parameters in case the defaults change.
_lowercase: List[str] = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=_UpperCamelCase , summary_activation=_UpperCamelCase , summary_proj_to_labels=_UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=_UpperCamelCase , use_cache=_UpperCamelCase , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_lowercase: Optional[int] = GPTaConfig.from_json_file(args.config_file )
_lowercase: str = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
_lowercase: Optional[int] = convert_megatron_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_UpperCamelCase , _UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowercase: Optional[int] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowercase: int = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
_lowercase: str = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_lowercase: str = '''gpt2'''
_lowercase: List[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase )
_lowercase: Any = type(_UpperCamelCase ).__name__
_lowercase: Tuple = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(_UpperCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(_UpperCamelCase )
# Store the state_dict to file.
_lowercase: int = os.path.join(_UpperCamelCase , '''pytorch_model.bin''' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(_UpperCamelCase , _UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 272 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 419 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 419 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :jnp.ndarray
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , A__ , A__ ):
UpperCAmelCase_ :int = 32
UpperCAmelCase_ :int = 4
UpperCAmelCase_ :int = 4
UpperCAmelCase_ :Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ :Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase_ :Union[bool, Tuple[bool]] = False
UpperCAmelCase_ :Tuple[int] = (320, 640, 1280, 1280)
UpperCAmelCase_ :int = 2
UpperCAmelCase_ :Union[int, Tuple[int]] = 8
UpperCAmelCase_ :Optional[Union[int, Tuple[int]]] = None
UpperCAmelCase_ :int = 1280
UpperCAmelCase_ :float = 0.0
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :jnp.dtype = jnp.floataa
UpperCAmelCase_ :bool = True
UpperCAmelCase_ :int = 0
UpperCAmelCase_ :bool = False
def __lowerCAmelCase ( self , __A ) -> FrozenDict:
# init input tensors
lowerCAmelCase_ :Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase_ :Optional[int] = jnp.zeros(__A , dtype=jnp.floataa )
lowerCAmelCase_ :List[str] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase_ :Tuple = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = jax.random.split(__A )
lowerCAmelCase_ :Dict = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__A , __A , __A , __A )["params"]
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = self.block_out_channels
lowerCAmelCase_ :Dict = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase_ :int = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase_ :Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase_ :Optional[int] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase_ :List[str] = FlaxTimestepEmbedding(__A , dtype=self.dtype )
lowerCAmelCase_ :Optional[Any] = self.only_cross_attention
if isinstance(__A , __A ):
lowerCAmelCase_ :Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__A , __A ):
lowerCAmelCase_ :str = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase_ :Optional[Any] = []
lowerCAmelCase_ :Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase_ :Optional[Any] = output_channel
lowerCAmelCase_ :int = block_out_channels[i]
lowerCAmelCase_ :List[Any] = i == len(__A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase_ :List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase_ :Optional[int] = FlaxDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__A )
lowerCAmelCase_ :Union[str, Any] = down_blocks
# mid
lowerCAmelCase_ :Any = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :Optional[int] = list(reversed(__A ) )
lowerCAmelCase_ :Tuple = list(reversed(__A ) )
lowerCAmelCase_ :Optional[Any] = list(reversed(__A ) )
lowerCAmelCase_ :List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCAmelCase_ :List[Any] = output_channel
lowerCAmelCase_ :Union[str, Any] = reversed_block_out_channels[i]
lowerCAmelCase_ :Any = reversed_block_out_channels[min(i + 1 , len(__A ) - 1 )]
lowerCAmelCase_ :List[str] = i == len(__A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCAmelCase_ :str = FlaxCrossAttnUpBlockaD(
in_channels=__A , out_channels=__A , prev_output_channel=__A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase_ :Dict = FlaxUpBlockaD(
in_channels=__A , out_channels=__A , prev_output_channel=__A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__A )
lowerCAmelCase_ :Dict = output_channel
lowerCAmelCase_ :Tuple = up_blocks
# out
lowerCAmelCase_ :Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCAmelCase_ :Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __A , __A , __A , __A=None , __A=None , __A = True , __A = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(__A , jnp.ndarray ):
lowerCAmelCase_ :Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__A , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ :Optional[int] = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase_ :Any = jnp.expand_dims(__A , 0 )
lowerCAmelCase_ :Any = self.time_proj(__A )
lowerCAmelCase_ :int = self.time_embedding(__A )
# 2. pre-process
lowerCAmelCase_ :Optional[Any] = jnp.transpose(__A , (0, 2, 3, 1) )
lowerCAmelCase_ :List[Any] = self.conv_in(__A )
# 3. down
lowerCAmelCase_ :str = (sample,)
for down_block in self.down_blocks:
if isinstance(__A , __A ):
lowerCAmelCase_ , lowerCAmelCase_ :str = down_block(__A , __A , __A , deterministic=not train )
else:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = down_block(__A , __A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCAmelCase_ :List[Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
__A , __A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase_ :Tuple = new_down_block_res_samples
# 4. mid
lowerCAmelCase_ :Optional[Any] = self.mid_block(__A , __A , __A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCAmelCase_ :Any = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCAmelCase_ :List[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__A , __A ):
lowerCAmelCase_ :List[str] = up_block(
__A , temb=__A , encoder_hidden_states=__A , res_hidden_states_tuple=__A , deterministic=not train , )
else:
lowerCAmelCase_ :Any = up_block(__A , temb=__A , res_hidden_states_tuple=__A , deterministic=not train )
# 6. post-process
lowerCAmelCase_ :Optional[Any] = self.conv_norm_out(__A )
lowerCAmelCase_ :Optional[Any] = nn.silu(__A )
lowerCAmelCase_ :List[Any] = self.conv_out(__A )
lowerCAmelCase_ :Dict = jnp.transpose(__A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__A )
| 256 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase_ :Tuple = torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=__A , text_to_image_strength=0.7_5 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Any = VersatileDiffusionPipeline.from_pretrained(__A , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :str = generator.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = pipe.dual_guided(
prompt="""first prompt""" , image=__A , text_to_image_strength=0.7_5 , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :str = """cyberpunk 2077"""
lowerCAmelCase_ :Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase_ :List[str] = torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = pipe.dual_guided(
prompt=__A , image=__A , text_to_image_strength=0.7_5 , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCAmelCase_ :int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase_ :List[str] = """A painting of a squirrel eating a burger """
lowerCAmelCase_ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase_ :Dict = pipe.text_to_image(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
lowerCAmelCase_ :List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :str = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase_ :Any = pipe.image_variation(__A , generator=__A , output_type="""numpy""" ).images
lowerCAmelCase_ :Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Union[str, Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 256 | 1 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[str] = CodeGenTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] = CodeGenTokenizerFast
SCREAMING_SNAKE_CASE_: Any = True
SCREAMING_SNAKE_CASE_: str = {"""add_prefix_space""": True}
SCREAMING_SNAKE_CASE_: Any = False
def _UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
A__ = dict(zip(__a , range(len(__a ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def _UpperCAmelCase ( self , **__a ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
A__ = tokens + [tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer(add_prefix_space=__a )
A__ = 'lower newer'
# Testing tokenization
A__ = tokenizer.tokenize(__a , add_prefix_space=__a )
A__ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
A__ = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
A__ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
A__ = self.get_rust_tokenizer(add_prefix_space=__a )
A__ = tokenizer.encode(__a , add_prefix_space=__a )
A__ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
A__ = tokens + [rust_tokenizer.unk_token]
A__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def _UpperCAmelCase ( self , *__a , **__a ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self , __a=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input looooooooong', 'This is a simple input']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
A__ = tokenizer.pad_token_id
A__ = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
A__ = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
A__ = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
A__ = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = '$$$'
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = tokenizer.bos_token_id
A__ = tokenizer(__a )
A__ = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A__ = tokenizer.decode(out_s.input_ids )
A__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
A__ = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
A__ = '\nif len_a > len_b: result = a\nelse: result = b'
A__ = tokenizer.encode(__a )
A__ = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
A__ = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
| 260 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 260 | 1 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , )-> List[Any]:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCAmelCase_ )
if decoder_head_mask is None:
UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCAmelCase_ )
if cross_attn_head_mask is None:
UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCAmelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __a :
def __init__( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : List[str]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[str]="relu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : str=20 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : int=0 , )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Any:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.eos_token_id # Eos Token
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = self.get_config()
UpperCamelCase = prepare_mam_aaa_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _SCREAMING_SNAKE_CASE ( self : int )-> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple )-> List[str]:
"""simple docstring"""
UpperCamelCase = MaMaaaModel(config=UpperCAmelCase_ ).get_decoder().to(UpperCAmelCase_ ).eval()
UpperCamelCase = inputs_dict["input_ids"]
UpperCamelCase = inputs_dict["attention_mask"]
UpperCamelCase = inputs_dict["head_mask"]
# first forward pass
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )["last_hidden_state"]
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[
"last_hidden_state"
]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-2 ) )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] )-> str:
"""simple docstring"""
UpperCamelCase = MaMaaaModel(config=UpperCAmelCase_ ).to(UpperCAmelCase_ ).eval()
UpperCamelCase = model(**UpperCAmelCase_ )
UpperCamelCase = outputs.encoder_last_hidden_state
UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase_ )
UpperCamelCase = MaMaaaEncoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
UpperCamelCase = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase_ )
UpperCamelCase = MaMaaaDecoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
UpperCamelCase = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : List[str] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Optional[Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase_ : int = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : str = True
UpperCamelCase_ : Tuple = True
UpperCamelCase_ : str = False
UpperCamelCase_ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] )-> Any:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _SCREAMING_SNAKE_CASE ( self : str )-> List[str]:
"""simple docstring"""
UpperCamelCase = MaMaaaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ )
UpperCamelCase , UpperCamelCase = model_class.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
self.assertEqual(info["missing_keys"] , [] )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = copy.deepcopy(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not self.is_encoder_decoder:
UpperCamelCase = inputs["input_ids"]
del inputs["input_ids"]
else:
UpperCamelCase = inputs["input_ids"]
UpperCamelCase = inputs.get("decoder_input_ids" , UpperCAmelCase_ )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , UpperCAmelCase_ )
UpperCamelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase = wte(UpperCAmelCase_ )
else:
UpperCamelCase = wte(UpperCAmelCase_ )
UpperCamelCase = wte(UpperCAmelCase_ )
with torch.no_grad():
model(**UpperCAmelCase_ )[0]
def _SCREAMING_SNAKE_CASE ( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
UpperCamelCase = input_dict["input_ids"]
UpperCamelCase = input_ids.ne(1 ).to(UpperCAmelCase_ )
UpperCamelCase = MaMaaaForConditionalGeneration(UpperCAmelCase_ ).eval().to(UpperCAmelCase_ )
if torch_device == "cuda":
model.half()
model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
model.generate(num_beams=4 , do_sample=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , num_return_sequences=3 )
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
return torch.tensor(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __a ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int )-> Optional[Any]:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _SCREAMING_SNAKE_CASE ( self : int )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase_ )
UpperCamelCase = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
UpperCamelCase = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
with torch.no_grad():
UpperCamelCase = model(**UpperCAmelCase_ )[0]
UpperCamelCase = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
UpperCamelCase = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=UpperCAmelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> List[Any]:
"""simple docstring"""
UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase_ )
# change to intended input
UpperCamelCase = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
UpperCamelCase = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
with torch.no_grad():
UpperCamelCase = model(**UpperCAmelCase_ )[0]
UpperCamelCase = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
UpperCamelCase = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=UpperCAmelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Dict:
"""simple docstring"""
UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(UpperCAmelCase_ )
UpperCamelCase = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
UpperCamelCase = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="pt" )
UpperCamelCase = model.generate(
input_ids=dct["input_ids"].to(UpperCAmelCase_ ) , attention_mask=dct["attention_mask"].to(UpperCAmelCase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
UpperCamelCase = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
UpperCamelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
assert generated == expected_en
| 718 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Optional[int] = '''upernet'''
def __init__( self : int , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=0.4 , UpperCAmelCase_ : Union[str, Any]=384 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : str , )-> Tuple:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = backbone_config.get("model_type" )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(UpperCAmelCase_ )
UpperCamelCase = backbone_config
UpperCamelCase = hidden_size
UpperCamelCase = initializer_range
UpperCamelCase = pool_scales
UpperCamelCase = use_auxiliary_head
UpperCamelCase = auxiliary_loss_weight
UpperCamelCase = auxiliary_in_channels
UpperCamelCase = auxiliary_channels
UpperCamelCase = auxiliary_num_convs
UpperCamelCase = auxiliary_concat_input
UpperCamelCase = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 556 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
a = 1.5
a = int(factor * num_class_images )
a = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__lowerCamelCase , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=__lowerCamelCase )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
a = client.query(text=__lowerCamelCase )
if len(__lowerCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
a = int(factor * num_images )
a = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__lowerCamelCase , aesthetic_weight=0.1 , )
a = 0
a = 0
a = tqdm(desc="""downloading real regularization images""" , total=__lowerCamelCase )
with open(f'{class_data_dir}/caption.txt' , """w""" ) as fa, open(f'{class_data_dir}/urls.txt' , """w""" ) as fa, open(
f'{class_data_dir}/images.txt' , """w""" ) as fa:
while total < num_class_images:
a = class_images[count]
count += 1
try:
a = requests.get(images["""url"""] )
if img.status_code == 200:
a = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __A ( ) -> Any:
a = argparse.ArgumentParser("""""" , add_help=__lowerCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__lowerCamelCase , type=__lowerCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__lowerCamelCase , type=__lowerCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__lowerCamelCase )
return parser.parse_args()
if __name__ == "__main__":
__UpperCamelCase : Any = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 468 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
super().setUp()
a = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self :List[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :int ):
'''simple docstring'''
a = """<unk> UNwanted , running"""
a = """<unk> unwanted, running"""
return input_text, output_text
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__magic_name__ )
a = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__magic_name__ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [0, 4, 8, 7] )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = TransfoXLTokenizer(lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = TransfoXLTokenizer(lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = TransfoXLTokenizer(lower_case=__magic_name__ )
a = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
a = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__magic_name__ ) , __magic_name__ )
self.assertEqual(tokenizer.convert_tokens_to_string(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.get_tokenizer()
a = len(__magic_name__ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__magic_name__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 468 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : str = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ['''ChineseCLIPFeatureExtractor''']
lowercase : Union[str, Any] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 114 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : str = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'donut-swin'
A : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = image_size
snake_case_ : Any = patch_size
snake_case_ : str = num_channels
snake_case_ : Dict = embed_dim
snake_case_ : Tuple = depths
snake_case_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = num_heads
snake_case_ : Optional[int] = window_size
snake_case_ : Any = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : str = drop_path_rate
snake_case_ : List[str] = hidden_act
snake_case_ : Optional[int] = use_absolute_embeddings
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Any = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
| 114 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A_ ( _lowerCamelCase : list[float] ):
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309 | '''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case = concatenate_datasets
snake_case = DownloadConfig
snake_case = DownloadManager
snake_case = DownloadMode
snake_case = DownloadConfig
snake_case = DownloadMode
snake_case = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 309 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''pixel_values''']
def __init__( self, A = True, A = None, A = PILImageResampling.BICUBIC, A = True, A = True, A = 1 / 255, A = None, A = True, A = None, A = None, **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : Any = size if size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(A )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : str = get_size_dict(A, default_to_square=A, param_name='crop_size' )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self, A, A, A = PILImageResampling.BILINEAR, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(A )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(A, size=size['shortest_edge'], default_to_square=A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : Union[str, Any] = (size['height'], size['width'])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(A, size=A, resample=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A, size=(size['height'], size['width']), data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A = None, **A ):
'''simple docstring'''
return rescale(A, scale=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A, A = None, **A, ):
'''simple docstring'''
return normalize(A, mean=A, std=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = ChannelDimension.FIRST, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(A, param_name='crop_size', default_to_square=A )
SCREAMING_SNAKE_CASE : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(A )
if not is_batched(A ):
SCREAMING_SNAKE_CASE : List[Any] = [images]
if not valid_images(A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=A, size=A, resample=A ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : List[Any] = [self.center_crop(image=A, size=A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Optional[Any] = [self.rescale(image=A, scale=A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=A, mean=A, std=A ) for image in images]
SCREAMING_SNAKE_CASE : List[Any] = [to_channel_dimension_format(A, A ) for image in images]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=A, tensor_type=A )
| 508 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class UpperCAmelCase_ :
def __init__( self ) -> str:
__lowercase : List[Any] = psutil.Process()
__lowercase : Any = False
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[Any] = -1
while True:
__lowercase : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = True
__lowercase : List[Any] = threading.Thread(target=self.peak_monitor )
__lowercase : Optional[int] = True
self.thread.start()
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
a_ = PeakCPUMemory()
def __UpperCAmelCase ( ):
# Time
__lowercase : Union[str, Any] = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : List[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : List[str] = torch.cuda.memory_allocated(__UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __UpperCAmelCase ( __UpperCamelCase ):
# Time
__lowercase : List[Any] = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowercase : Dict = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase : str = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
__lowercase : Optional[int] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
return measures
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB""" )
__lowercase : Dict = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 76 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 100 ):
"""simple docstring"""
lowercase_ : Dict = n * (n + 1) * (2 * n + 1) / 6
lowercase_ : int = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 620 | 0 |
"""simple docstring"""
import inspect
import unittest
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __UpperCamelCase ( self : List[str]) -> str:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
_lowerCAmelCase:Union[str, Any] = inspect.getmembers(a__ ,inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
_lowerCAmelCase:Union[str, Any] = '''k-diffusion'''
elif backend == "invisible_watermark":
_lowerCAmelCase:Any = '''invisible-watermark'''
assert backend in deps, F'{backend} is not in the deps table!'
| 703 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class a__ ( UpperCamelCase_ ):
snake_case__ = 42
snake_case__ = None
def UpperCAmelCase ( snake_case : int , snake_case : int=0.9_99 , snake_case : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case : Dict ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_lowerCAmelCase:List[str] = []
for i in range(snake_case ):
_lowerCAmelCase:Union[str, Any] = i / num_diffusion_timesteps
_lowerCAmelCase:Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case ) / alpha_bar_fn(snake_case ) , snake_case ) )
return torch.tensor(snake_case , dtype=torch.floataa )
class a__ ( UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ = 1
@register_to_config
def __init__( self : Optional[Any] ,a__ : int = 1000 ,a__ : float = 0.0001 ,a__ : float = 0.02 ,a__ : str = "linear" ,a__ : Optional[Union[np.ndarray, List[float]]] = None ,a__ : bool = True ,a__ : bool = True ,a__ : int = 0 ,a__ : str = "epsilon" ,a__ : float = 1.0 ,**a__ : Any ,) -> str:
"""simple docstring"""
if kwargs.get('''set_alpha_to_one''' ,a__) is not None:
_lowerCAmelCase:Tuple = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' ,'''1.0.0''' ,a__ ,standard_warn=a__)
_lowerCAmelCase:Optional[Any] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_lowerCAmelCase:Any = torch.tensor(a__ ,dtype=torch.floataa)
elif beta_schedule == "linear":
_lowerCAmelCase:str = torch.linspace(a__ ,a__ ,a__ ,dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase:int = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,a__ ,dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase:Optional[int] = betas_for_alpha_bar(a__)
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}')
_lowerCAmelCase:Any = 1.0 - self.betas
_lowerCAmelCase:Optional[Any] = torch.cumprod(self.alphas ,dim=0)
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCAmelCase:Optional[Any] = torch.tensor(0.0) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCAmelCase:Any = 1.0
# setable values
_lowerCAmelCase:Dict = None
_lowerCAmelCase:List[Any] = torch.from_numpy(np.arange(0 ,a__).copy().astype(np.intaa))
def __UpperCamelCase ( self : Union[str, Any] ,a__ : torch.FloatTensor ,a__ : Optional[int] = None) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __UpperCamelCase ( self : Optional[int] ,a__ : int ,a__ : Union[str, torch.device] = None) -> List[Any]:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.')
_lowerCAmelCase:Union[str, Any] = num_inference_steps
_lowerCAmelCase:Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase:str = (np.arange(0 ,a__) * step_ratio).round().copy().astype(np.intaa)
_lowerCAmelCase:str = torch.from_numpy(a__).to(a__)
self.timesteps += self.config.steps_offset
def __UpperCamelCase ( self : int ,a__ : torch.FloatTensor ,a__ : int ,a__ : torch.FloatTensor ,a__ : float = 0.0 ,a__ : bool = False ,a__ : Optional[torch.FloatTensor] = None ,a__ : bool = True ,) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCAmelCase:Optional[Any] = self.alphas_cumprod[timestep]
_lowerCAmelCase:List[Any] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCAmelCase:str = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase:int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCAmelCase:str = model_output
elif self.config.prediction_type == "sample":
_lowerCAmelCase:List[str] = model_output
_lowerCAmelCase:Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase:str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCAmelCase:Dict = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''')
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase:List[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range)
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase:str = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase:Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=a__ ,pred_original_sample=a__)
def __len__( self : Dict) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 439 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {'''vocab_file''': '''spiece.model'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCamelCase : List[Any] = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
_lowerCamelCase : Dict = '''▁'''
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__ , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__=1_0_0 , lowercase__=None , lowercase__ = None , lowercase__=True , **lowercase__ , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__A =[f'''<extra_id_{i}>''' for i in range(lowercase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__A =len(set(filter(lambda lowercase__ : bool('''extra_id''' in str(lowercase__ ) ) , lowercase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
__A =legacy
__A ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , extra_ids=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase__ , **lowercase__ , )
__A =vocab_file
__A =extra_ids
__A =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@staticmethod
def __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__A =TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowercase__ , )
return max_model_length
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCamelCase ( self ):
'''simple docstring'''
__A ={self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase__ )) + [1]
return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def __UpperCamelCase ( self ):
'''simple docstring'''
return list(
set(filter(lambda lowercase__ : bool(re.search(R'''<extra_id_\d+>''' , lowercase__ ) ) is not None , self.additional_special_tokens ) ) )
def __UpperCamelCase ( self ):
'''simple docstring'''
return [self._convert_token_to_id(lowercase__ ) for token in self.get_sentinel_tokens()]
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
__A =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
__A =self._add_eos_if_not_present(lowercase__ )
if token_ids_a is None:
return token_ids_a
else:
__A =self._add_eos_if_not_present(lowercase__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
'''simple docstring'''
__A =self.__dict__.copy()
__A =None
return state
def __setstate__( self , lowercase__ ):
'''simple docstring'''
__A =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__A ={}
__A =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , lowercase__ , **lowercase__ ):
'''simple docstring'''
if not self.legacy:
__A =SPIECE_UNDERLINE + text.replace(lowercase__ , ''' ''' )
return super().tokenize(lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , lowercase__ , **lowercase__ ):
'''simple docstring'''
if not self.legacy:
__A =text.startswith(lowercase__ )
if is_first:
__A =text[1:]
__A =self.sp_model.encode(lowercase__ , out_type=lowercase__ )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(lowercase__ ):
__A =([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if token.startswith('''<extra_id_''' ):
__A =re.match(R'''<extra_id_(\d+)>''' , lowercase__ )
__A =int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase__ )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__A =self.sp_model.IdToPiece(lowercase__ )
else:
__A =f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A =[]
__A =''''''
__A =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
__A =True
__A =[]
else:
current_sub_tokens.append(lowercase__ )
__A =False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
if not os.path.isdir(lowercase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A =os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , '''wb''' ) as fi:
__A =self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 184 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCamelCase : List[Any] = logging.getLogger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , lowercase__=-1 ):
'''simple docstring'''
__A =label_idx
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
__A =mode.value
__A =os.path.join(lowercase__ , f'''{mode}.txt''' )
__A =1
__A =[]
with open(lowercase__ , encoding='''utf-8''' ) as f:
__A =[]
__A =[]
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
__A =[]
__A =[]
else:
__A =line.split(''' ''' )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__A =line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(lowercase__ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
__A =f.read().splitlines()
if "O" not in labels:
__A =['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
__A =f.read().splitlines()
if "O" not in labels:
__A =['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
__A =mode.value
__A =os.path.join(lowercase__ , f'''{mode}.txt''' )
__A =1
__A =[]
with open(lowercase__ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(lowercase__ ):
__A =[]
__A =[]
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =0
for sentence in parse_incr(lowercase__ ):
__A =preds_list[example_id]
__A =''''''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 184 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Dict = len(lowerCAmelCase__)
A_ : List[Any] = len(lowerCAmelCase__)
A_ : str = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Optional[Any] = True
for i in range(lowerCAmelCase__):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Union[str, Any] = True
if a[i].islower():
A_ : str = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 573 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] ):
A__ = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
A__ = True if """large""" in model_name or """huge""" in model_name else False
A__ = True if """large""" in model_name or """huge""" in model_name else False
A__ = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
A__ = [3, 3, 3, 3]
A__ = [5, 5, 5, 5]
elif "fl4" in model_name:
A__ = [4, 4, 4, 4]
A__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
A__ = [3, 3, 3, 3]
if "lrf" in model_name:
A__ = [3, 3, 3, 3]
else:
A__ = [2, 2, 2, 2]
if "tiny" in model_name:
A__ = 96
elif "small" in model_name:
A__ = 96
elif "base" in model_name:
A__ = 128
elif "large" in model_name:
A__ = 192
elif "xlarge" in model_name:
A__ = 256
elif "huge" in model_name:
A__ = 352
# set label information
A__ = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
A__ = """imagenet-22k-id2label.json"""
else:
A__ = """imagenet-1k-id2label.json"""
A__ = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
A__ = {int(UpperCamelCase ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = FocalNetConfig(
embed_dim=UpperCamelCase , depths=UpperCamelCase , focal_levels=UpperCamelCase , focal_windows=UpperCamelCase , use_conv_embed=UpperCamelCase , idalabel=UpperCamelCase , labelaid=UpperCamelCase , use_post_layernorm=UpperCamelCase , use_layerscale=UpperCamelCase , )
return config
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Any ):
if "patch_embed.proj" in name:
A__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
A__ = """encoder.""" + name
if "encoder.layers" in name:
A__ = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
A__ = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
A__ = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
A__ = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
A__ = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
A__ = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
A__ = """layernorm.weight"""
if name == "norm.bias":
A__ = """layernorm.bias"""
if "head" in name:
A__ = name.replace("""head""" , """classifier""" )
else:
A__ = """focalnet.""" + name
return name
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : int=False ):
# fmt: off
A__ = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
A__ = model_name_to_url[model_name]
print("""Checkpoint URL: """ , UpperCamelCase )
A__ = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(UpperCamelCase )
A__ = val
A__ = get_focalnet_config(UpperCamelCase )
A__ = FocalNetForImageClassification(UpperCamelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase )
# verify conversion
A__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ = BitImageProcessor(
do_resize=UpperCamelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase , crop_size=224 , do_normalize=UpperCamelCase , image_mean=UpperCamelCase , image_std=UpperCamelCase , )
A__ = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
A__ = processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
A__ = image_transforms(UpperCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase , atol=1e-4 )
A__ = model(**UpperCamelCase )
A__ = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
A__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
A__ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
A__ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
A__ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
A__ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
A__ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
lowerCamelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 574 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase_ ( lowercase__ , lowercase__=10):
lowerCamelCase__ = []
for _ in range(lowercase__):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
return lrs
def lowerCamelCase_ ( lowercase__ , lowercase__=10):
lowerCamelCase__ = []
for step in range(lowercase__):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ = os.path.join(lowercase__ , "schedule.bin")
torch.save(scheduler.state_dict() , lowercase__)
lowerCamelCase__ = torch.load(lowercase__)
scheduler.load_state_dict(lowercase__)
return lrs
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : int ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase )
lowerCamelCase__ = torch.tensor([0.4, 0.2, -0.5] )
lowerCamelCase__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCamelCase__ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
lowerCamelCase__ = criterion(__lowerCamelCase , __lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def a__ ( self : str ) -> str:
'''simple docstring'''
lowerCamelCase__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase )
lowerCamelCase__ = torch.tensor([0.4, 0.2, -0.5] )
lowerCamelCase__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCamelCase__ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCamelCase , weight_decay=0.0 , relative_step=__lowerCamelCase , scale_parameter=__lowerCamelCase , warmup_init=__lowerCamelCase , )
for _ in range(1000 ):
lowerCamelCase__ = criterion(__lowerCamelCase , __lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = nn.Linear(50 , 50 ) if is_torch_available() else None
lowerCAmelCase__ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCAmelCase__ = 10
def a__ ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict=None ) -> Dict:
'''simple docstring'''
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase , msg=__lowerCamelCase )
def a__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCamelCase__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.5_3, 5.0, 1.4_6, 10.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
lowerCamelCase__ , lowerCamelCase__ = data
lowerCamelCase__ = scheduler_func(self.optimizer , **__lowerCamelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCamelCase__ = unwrap_schedule(__lowerCamelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCamelCase , __lowerCamelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
lowerCamelCase__ = scheduler_func(self.optimizer , **__lowerCamelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCamelCase ) # wrap to test picklability of the schedule
lowerCamelCase__ = unwrap_and_save_reload_schedule(__lowerCamelCase , self.num_steps )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class lowercase :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCamelCase__ = fn
def __call__( self : Dict , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ) -> int:
'''simple docstring'''
return self.fn(*__lowerCamelCase , **__lowerCamelCase )
@classmethod
def a__ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = list(map(self , scheduler.lr_lambdas ) )
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Tuple = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "mobilenet_v1"
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Any=224 , __lowerCamelCase : Tuple=1.0 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : str="relu6" , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=0.9_9_9 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : str=0.0_0_1 , **__lowerCamelCase : str , ) -> Dict:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = depth_multiplier
lowerCamelCase__ = min_depth
lowerCamelCase__ = hidden_act
lowerCamelCase__ = tf_padding
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("1.11" )
@property
def a__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def a__ ( self : List[Any] ) -> float:
'''simple docstring'''
return 1E-4
| 187 | 0 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = os.path.abspath(UpperCamelCase )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowerCAmelCase__ : Any = tf.train.list_variables(UpperCamelCase )
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCAmelCase__ : Dict = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCAmelCase__ : Tuple = name[1:]
# figure out how many levels deep the name is
lowerCAmelCase__ : Optional[int] = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(UpperCamelCase )
# read data
lowerCAmelCase__ : str = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
names.append("""/""".join(UpperCamelCase ) )
arrays.append(UpperCamelCase )
logger.info(f"""Read a total of {len(UpperCamelCase ):,} layers""" )
# Sanity check
if len(set(UpperCamelCase ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(UpperCamelCase ) )})""" )
lowerCAmelCase__ : List[Any] = list(set(UpperCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = full_name.split("""/""" )
lowerCAmelCase__ : Tuple = model
lowerCAmelCase__ : str = []
for i, m_name in enumerate(UpperCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
lowerCAmelCase__ : int = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
lowerCAmelCase__ : Optional[Any] = getattr(UpperCamelCase , """embeddings""" )
lowerCAmelCase__ : Union[str, Any] = getattr(UpperCamelCase , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
lowerCAmelCase__ : Optional[Any] = getattr(UpperCamelCase , """encoder""" )
lowerCAmelCase__ : Optional[int] = getattr(UpperCamelCase , """layer""" )
lowerCAmelCase__ : Optional[Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
lowerCAmelCase__ : Optional[int] = getattr(UpperCamelCase , """pooler""" )
lowerCAmelCase__ : Dict = getattr(UpperCamelCase , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
lowerCAmelCase__ : str = getattr(UpperCamelCase , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
lowerCAmelCase__ : str = getattr(UpperCamelCase , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
lowerCAmelCase__ : Optional[Any] = getattr(UpperCamelCase , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
lowerCAmelCase__ : Any = getattr(UpperCamelCase , """token_type_embeddings""" )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append("""weight""" )
lowerCAmelCase__ : Optional[int] = getattr(UpperCamelCase , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
lowerCAmelCase__ : str = getattr(UpperCamelCase , """attention""" )
lowerCAmelCase__ : str = getattr(UpperCamelCase , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
lowerCAmelCase__ : Optional[Any] = getattr(UpperCamelCase , """attention""" )
lowerCAmelCase__ : Dict = getattr(UpperCamelCase , """output""" )
lowerCAmelCase__ : Union[str, Any] = getattr(UpperCamelCase , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
lowerCAmelCase__ : Union[str, Any] = getattr(UpperCamelCase , """attention""" )
lowerCAmelCase__ : Optional[Any] = getattr(UpperCamelCase , """output""" )
lowerCAmelCase__ : Tuple = getattr(UpperCamelCase , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
lowerCAmelCase__ : List[str] = getattr(UpperCamelCase , """output""" )
lowerCAmelCase__ : Dict = getattr(UpperCamelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
lowerCAmelCase__ : Optional[Any] = getattr(UpperCamelCase , """output""" )
lowerCAmelCase__ : List[str] = getattr(UpperCamelCase , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
lowerCAmelCase__ : Any = getattr(UpperCamelCase , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
lowerCAmelCase__ : Tuple = getattr(UpperCamelCase , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
lowerCAmelCase__ : Optional[int] = getattr(UpperCamelCase , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
lowerCAmelCase__ : List[str] = getattr(UpperCamelCase , """intermediate""" )
lowerCAmelCase__ : Dict = getattr(UpperCamelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
lowerCAmelCase__ : Any = getattr(UpperCamelCase , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
lowerCAmelCase__ : str = getattr(UpperCamelCase , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
lowerCAmelCase__ : int = getattr(UpperCamelCase , """weight""" )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowerCAmelCase__ : Optional[Any] = """.""".join(UpperCamelCase )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , UpperCamelCase ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCAmelCase__ : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
lowerCAmelCase__ : Union[str, Any] = torch.from_numpy(UpperCamelCase )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
logger.info(f"""Loading model based on config from {config_path}...""" )
lowerCAmelCase__ : List[str] = BertConfig.from_json_file(UpperCamelCase )
lowerCAmelCase__ : str = BertModel(UpperCamelCase )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
_lowerCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 565 |
def _UpperCAmelCase ( UpperCamelCase: int ):
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(UpperCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 611 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 641 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 641 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.