code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__lowercase : List[str] = datasets.logging.get_logger(__name__)
__lowercase : List[str] = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
__lowercase : Dict = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
__lowercase : Optional[int] = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
__lowercase : str = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
__a : int = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
__a : str = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__a : List[str] = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
__a : Optional[int] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__a : Optional[int] = score.BleurtScorer(os.path.join(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Union[str, Any] = self.scorer.score(references=__a , candidates=__a )
return {"scores": scores}
| 27 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = torch.device('cpu')
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = dct.pop(UpperCamelCase__ )
_a : Dict = val
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = []
for k in state_dict.keys():
_a : Any = k
if ".pwconv" in k:
_a : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
_a : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
_a : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
_a : Tuple = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_a : int = k_new.split(""".""" )
if ls[2].isdigit():
_a : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_a : Tuple = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_a : Optional[int] = 1_0_0_0
_a : Optional[Any] = """huggingface/label-files"""
_a : Optional[Any] = """imagenet-1k-id2label.json"""
_a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Dict = idalabel
_a : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_a : Any = [3, 3, 6, 4]
_a : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
_a : Any = [3, 3, 9, 6]
_a : List[str] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
_a : List[Any] = [4, 3, 1_0, 5]
_a : Optional[int] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
_a : List[Any] = [4, 4, 1_2, 6]
_a : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_a : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ )
else:
_a : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : int = checkpoint
_a : Optional[Any] = create_rename_keys(UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
_a : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval()
hf_model.load_state_dict(UpperCamelCase__ )
# prepare test inputs
_a : Any = prepare_img()
_a : Union[str, Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_a : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""pt""" )
# compare outputs from both models
_a : Dict = get_expected_output(UpperCamelCase__ )
_a : int = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 294 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = MgpstrTokenizer
__a = False
__a = {}
__a = False
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCAmelCase : List[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__UpperCAmelCase : Optional[Any] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
def lowerCamelCase__ ( self : Dict , **UpperCamelCase : Any ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = """tester"""
__UpperCAmelCase : Dict = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Any = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__UpperCAmelCase : Optional[Any] = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 1 )
__UpperCAmelCase : List[str] = tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
self.assertTrue(special_token not in decoded )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[Any] = self.get_input_output_texts(UpperCamelCase )
__UpperCAmelCase : str = tokenizer.tokenize(UpperCamelCase )
__UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertNotEqual(len(UpperCamelCase ) , 0 )
__UpperCAmelCase : str = tokenizer.decode(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , UpperCamelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
| 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_UpperCamelCase = nums[0]
_UpperCamelCase = 0
for num in nums[1:]:
_UpperCamelCase , _UpperCamelCase = (
max_excluding + num,
max(lowercase, lowercase ),
)
return max(lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def a__ ( lowercase : str ) -> Dict:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCamelCase = k.replace(lowercase, lowercase )
if k.startswith('''encoder''' ):
_UpperCamelCase = k.replace('''.attn''', '''.self_attn''' )
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
_UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' )
_UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' )
return k
def a__ ( lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
_UpperCamelCase = sd.pop(lowercase )
_UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' )
assert new_k not in sd
_UpperCamelCase = v
lowercase__ : str = ['START']
@torch.no_grad()
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = torch.load(lowercase, map_location='''cpu''' )
_UpperCamelCase = model['''model''']
_UpperCamelCase = BlenderbotConfig.from_json_file(lowercase )
_UpperCamelCase = BlenderbotForConditionalGeneration(lowercase )
_UpperCamelCase = m.model.state_dict().keys()
_UpperCamelCase = []
_UpperCamelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCamelCase = rename_state_dict_key(lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCamelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowercase )
m.model.load_state_dict(lowercase, strict=lowercase )
m.half()
m.save_pretrained(lowercase )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 324 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : int ,__lowercase : Tuple ,__lowercase : List[str] ,__lowercase : Union[str, Any] ,__lowercase : Dict ):
'''simple docstring'''
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A_ : List[str] = 'lm_head'
A_ : List[str] = getattr(__lowercase ,__lowercase )
if weight_type is not None:
A_ : Union[str, Any] = getattr(__lowercase ,__lowercase ).shape
else:
A_ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ : Any = value
elif weight_type == "weight_g":
A_ : Union[str, Any] = value
elif weight_type == "weight_v":
A_ : Optional[int] = value
elif weight_type == "bias":
A_ : Any = value
else:
A_ : Tuple = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCamelCase ( __lowercase : Dict ,__lowercase : Union[str, Any] ,__lowercase : Any ):
'''simple docstring'''
A_ : List[str] = []
A_ : str = fairseq_model.state_dict()
A_ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A_ : Dict = False
if "conv_layers" in name:
load_conv_layer(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,hf_model.config.feat_extract_norm == 'group' ,)
A_ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
A_ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A_ : Optional[int] = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(__lowercase )[0].split('.' )[-2]
A_ : Dict = mapped_key.replace('*' ,__lowercase )
if "weight_g" in name:
A_ : Dict = 'weight_g'
elif "weight_v" in name:
A_ : str = 'weight_v'
elif "bias" in name:
A_ : List[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ : Dict = 'weight'
else:
A_ : List[Any] = None
set_recursively(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( __lowercase : Any ,__lowercase : List[str] ,__lowercase : Dict ,__lowercase : int ,__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : Any = full_name.split('conv_layers.' )[-1]
A_ : List[Any] = name.split('.' )
A_ : Optional[int] = int(items[0] )
A_ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ : str = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Union[str, Any] ,__lowercase : Union[str, Any]=None ,__lowercase : Optional[Any]=None ,__lowercase : Optional[int]=True ):
'''simple docstring'''
if config_path is not None:
A_ : List[str] = UniSpeechConfig.from_pretrained(__lowercase )
else:
A_ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
A_ : int = Dictionary.load_from_json(__lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : List[Any] = target_dict.pad_index
A_ : Tuple = target_dict.bos_index
A_ : Tuple = target_dict.eos_index
A_ : List[Any] = len(target_dict.symbols )
A_ : Any = os.path.join(__lowercase ,'vocab.json' )
if not os.path.isdir(__lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowercase ) )
return
os.makedirs(__lowercase ,exist_ok=__lowercase )
A_ : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
A_ : List[Any] = 42
A_ : Tuple = 43
with open(__lowercase ,'w' ,encoding='utf-8' ) as vocab_handle:
json.dump(__lowercase ,__lowercase )
A_ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
__lowercase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='|' ,do_lower_case=__lowercase ,)
A_ : Union[str, Any] = True if config.feat_extract_norm == 'layer' else False
A_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=__lowercase ,return_attention_mask=__lowercase ,)
A_ : List[str] = WavaVecaProcessor(feature_extractor=__lowercase ,tokenizer=__lowercase )
processor.save_pretrained(__lowercase )
A_ : Tuple = UniSpeechForCTC(__lowercase )
else:
A_ : Tuple = UniSpeechForPreTraining(__lowercase )
if is_finetuned:
A_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
A_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A_ : Optional[Any] = model[0].eval()
recursively_load_weights(__lowercase ,__lowercase ,__lowercase )
hf_unispeech.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 366 | import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 192 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ):
__UpperCamelCase =1
__UpperCamelCase =0
__UpperCamelCase =1
__UpperCamelCase =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = GPTaTokenizer
UpperCAmelCase__ : Any = GPTaTokenizerFast
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : int = {"add_prefix_space": True}
UpperCAmelCase__ : Any = False
def _a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase ={'unk_token': '<unk>'}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _a ( self , **A_ ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , **A_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase ='lower newer'
__UpperCamelCase ='lower newer'
return input_text, output_text
def _a ( self ) -> List[Any]:
__UpperCamelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='lower newer'
__UpperCamelCase =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self ) -> int:
if not self.test_rust_tokenizer:
return
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase ='lower newer'
# Testing tokenization
__UpperCamelCase =tokenizer.tokenize(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids without special tokens
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids with special tokens
__UpperCamelCase =self.get_rust_tokenizer(add_prefix_space=A_ )
__UpperCamelCase =tokenizer.encode(A_ , add_prefix_space=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# Testing the unknown token
__UpperCamelCase =tokens + [rust_tokenizer.unk_token]
__UpperCamelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _a ( self , *A_ , **A_ ) -> Optional[int]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _a ( self , A_=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
def _a ( self ) -> int:
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input looooooooong', 'This is a simple input']
__UpperCamelCase =('This is a simple input', 'This is a pair')
__UpperCamelCase =[
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__UpperCamelCase =tokenizer.pad_token_id
__UpperCamelCase =tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
__UpperCamelCase =tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' )
__UpperCamelCase =tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ='$$$'
__UpperCamelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ )
__UpperCamelCase ='This is a simple input'
__UpperCamelCase =['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase =tokenizer.bos_token_id
__UpperCamelCase =tokenizer(A_ )
__UpperCamelCase =tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] , A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__UpperCamelCase =tokenizer.decode(out_s.input_ids )
__UpperCamelCase =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> Any:
# TODO: change to self.get_tokenizers() when the fast version is implemented
__UpperCamelCase =[self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='Encode this.'
__UpperCamelCase ='This one too please.'
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(
A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , )
__UpperCamelCase =encoded_sequence_dict['input_ids']
__UpperCamelCase =encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(A_ ) , len(A_ ) )
__UpperCamelCase =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
__UpperCamelCase =[x for x in filtered_sequence if x is not None]
self.assertEqual(A_ , A_ )
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./test_opt' )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
def _a ( self ) -> Dict:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ )
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# Same as above
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
__UpperCamelCase ='bos'
__UpperCamelCase =tokenizer.get_vocab()['bos']
__UpperCamelCase ='A photo of a cat'
__UpperCamelCase =tokenizer.encode(
A_ , )
# We changed the bos token
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
__UpperCamelCase =AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
__UpperCamelCase =tokenizer.encode(
A_ , )
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
| 62 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : str = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_):
_lowerCAmelCase : List[str] = """timesformer"""
def __init__( self : Optional[Any] , lowercase_ : Tuple=224 , lowercase_ : str=16 , lowercase_ : int=3 , lowercase_ : List[Any]=8 , lowercase_ : Union[str, Any]=768 , lowercase_ : List[str]=12 , lowercase_ : List[Any]=12 , lowercase_ : Dict=3072 , lowercase_ : int="gelu" , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Dict=1E-6 , lowercase_ : Tuple=True , lowercase_ : Dict="divided_space_time" , lowercase_ : Optional[int]=0 , **lowercase_ : Optional[Any] , ):
super().__init__(**lowercase_ )
snake_case_ : List[str] = image_size
snake_case_ : Tuple = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Dict = num_frames
snake_case_ : Tuple = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : Dict = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Optional[int] = initializer_range
snake_case_ : Any = layer_norm_eps
snake_case_ : Tuple = qkv_bias
snake_case_ : str = attention_type
snake_case_ : Optional[Any] = drop_path_rate
| 356 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _snake_case ( self : List[str] ):
snake_case_, snake_case_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
snake_case_ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
snake_case_ : Tuple = jax.device_count()
snake_case_ : Dict = num_samples * [prompt]
snake_case_ : Tuple = sd_pipe.prepare_inputs(lowercase_ )
snake_case_ : str = replicate(lowercase_ )
snake_case_ : Any = shard(lowercase_ )
snake_case_ : Optional[int] = jax.random.PRNGKey(0 )
snake_case_ : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
snake_case_ : Optional[Any] = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
snake_case_ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ : str = images[0, 253:256, 253:256, -1]
snake_case_ : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ : int = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _snake_case ( self : str ):
snake_case_ : Optional[Any] = '''stabilityai/stable-diffusion-2'''
snake_case_, snake_case_ : Union[str, Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_, snake_case_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
snake_case_ : List[Any] = scheduler_params
snake_case_ : int = '''A painting of a squirrel eating a burger'''
snake_case_ : str = jax.device_count()
snake_case_ : Union[str, Any] = num_samples * [prompt]
snake_case_ : int = sd_pipe.prepare_inputs(lowercase_ )
snake_case_ : List[str] = replicate(lowercase_ )
snake_case_ : List[Any] = shard(lowercase_ )
snake_case_ : int = jax.random.PRNGKey(0 )
snake_case_ : Tuple = jax.random.split(lowercase_ , jax.device_count() )
snake_case_ : int = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
snake_case_ : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ : List[str] = images[0, 253:256, 253:256, -1]
snake_case_ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ : Optional[int] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 155 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'autoformer'
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase = None , lowercase = None , lowercase = "student_t" , lowercase = "nll" , lowercase = 1 , lowercase = [1, 2, 3, 4, 5, 6, 7] , lowercase = True , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 64 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = 32 , lowercase = 32 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 100 , lowercase = 0.02 , lowercase = True , lowercase=True , lowercase = 10 , lowercase = 25 , lowercase = 3 , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
A__ = prediction_length
A__ = context_length if context_length is not None else prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
A__ = cardinality
else:
A__ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
A__ = embedding_dimension
else:
A__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(self.lags_sequence ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
# Autoformer
A__ = label_length
A__ = moving_average
A__ = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 68 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a ( *_UpperCAmelCase : List[str] ):
'''simple docstring'''
with open(_UpperCAmelCase , '''r''' ) as fh:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_UN )
__A =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__A =torch.device("cuda", local_rank)
__A =socket.gethostname()
__A =f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__A =dist.get_rank()
__A =dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 226 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : List[Any] = image_size
_a : Optional[int] = patch_size
_a : List[str] = num_channels
_a : Dict = is_training
_a : Dict = use_labels
_a : Optional[Any] = hidden_size
_a : str = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Dict = intermediate_size
_a : Union[str, Any] = hidden_act
_a : List[str] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : int = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
_a : str = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
_a : Tuple = self.type_sequence_label_size
_a : int = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = model(_a ,labels=_a )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : int = 1
_a : Optional[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = ViTMSNModelTester(self )
_a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[Any] = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a )
_a : List[str] = self.default_image_processor
_a : int = prepare_img()
_a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 5 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : int = SwinConfig()
snake_case__ : List[str] = swin_name.split('''_''' )
snake_case__ : Union[str, Any] = name_split[1]
snake_case__ : List[str] = int(name_split[4] )
snake_case__ : List[str] = int(name_split[3][-1] )
if model_size == "tiny":
snake_case__ : Tuple = 96
snake_case__ : Optional[int] = (2, 2, 6, 2)
snake_case__ : Any = (3, 6, 12, 24)
elif model_size == "small":
snake_case__ : List[Any] = 96
snake_case__ : List[str] = (2, 2, 18, 2)
snake_case__ : Optional[int] = (3, 6, 12, 24)
elif model_size == "base":
snake_case__ : Any = 128
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : int = (4, 8, 16, 32)
else:
snake_case__ : Dict = 192
snake_case__ : Union[str, Any] = (2, 2, 18, 2)
snake_case__ : str = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case__ : int = 21841
else:
snake_case__ : Dict = 1000
snake_case__ : List[Any] = '''huggingface/label-files'''
snake_case__ : List[str] = '''imagenet-1k-id2label.json'''
snake_case__ : List[Any] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Tuple = idalabel
snake_case__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
snake_case__ : List[Any] = img_size
snake_case__ : Any = num_classes
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Dict = depths
snake_case__ : List[Any] = num_heads
snake_case__ : List[str] = window_size
return config
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
snake_case__ : Optional[Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case__ : List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
snake_case__ : Dict = '''encoder.''' + name
if "attn.proj" in name:
snake_case__ : Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case__ : Optional[int] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case__ : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case__ : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case__ : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case__ : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
snake_case__ : int = '''layernorm.weight'''
if name == "norm.bias":
snake_case__ : Union[str, Any] = '''layernorm.bias'''
if "head" in name:
snake_case__ : Any = name.replace('''head''' , '''classifier''' )
else:
snake_case__ : Optional[Any] = '''swin.''' + name
return name
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case__ : Tuple = orig_state_dict.pop(__lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case__ : Optional[Any] = key.split('''.''' )
snake_case__ : Union[str, Any] = int(key_split[1] )
snake_case__ : Union[str, Any] = int(key_split[3] )
snake_case__ : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ : Optional[int] = val[:dim, :]
snake_case__ : List[Any] = val[
dim : dim * 2, :
]
snake_case__ : Union[str, Any] = val[-dim:, :]
else:
snake_case__ : List[Any] = val[
:dim
]
snake_case__ : Optional[int] = val[
dim : dim * 2
]
snake_case__ : Tuple = val[
-dim:
]
else:
snake_case__ : List[str] = val
return orig_state_dict
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
snake_case__ : Any = get_swin_config(__lowerCAmelCase )
snake_case__ : str = SwinForImageClassification(__lowerCAmelCase )
model.eval()
snake_case__ : Any = convert_state_dict(timm_model.state_dict() , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
snake_case__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Optional[int] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
snake_case__ : int = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
snake_case__ : Tuple = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' )
snake_case__ : int = timm_model(inputs['''pixel_values'''] )
snake_case__ : List[str] = model(**__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 230 |
import random
from .binary_exp_mod import bin_exp_mod
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=1000 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case__ : Tuple = n - 1
snake_case__ : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case__ : List[str] = 0
while count < prec:
snake_case__ : List[str] = random.randint(2 , n - 1 )
snake_case__ : Optional[Any] = bin_exp_mod(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if b != 1:
snake_case__ : List[Any] = True
for _ in range(__lowerCAmelCase ):
if b == n - 1:
snake_case__ : List[str] = False
break
snake_case__ : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 230 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 355 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether tp freeze the encoder.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class __lowerCamelCase :
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__UpperCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__UpperCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__UpperCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Source language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': 'Target language id for translation.'} )
__UpperCamelCase = field(default=__lowercase , metadata={'help': '# num_beams to use for evaluation.'} )
__UpperCamelCase = field(
default=__lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , F"""{split}_results.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
snake_case_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="""test""" )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def __UpperCAmelCase ( snake_case_ : Any ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 317 | 0 |
"""simple docstring"""
from typing import Any
def A ( snake_case :int , snake_case :Tuple , snake_case :Any , snake_case :Any , snake_case :int , ) -> list:
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
__UpperCamelCase = {}
__UpperCamelCase = {}
for state in states_space:
__UpperCamelCase = observations_space[0]
__UpperCamelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCamelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
__UpperCamelCase = observations_space[o]
__UpperCamelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCamelCase = ''
__UpperCamelCase = -1
for k_state in states_space:
__UpperCamelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCamelCase = probability
__UpperCamelCase = k_state
# Update probabilities and pointers dicts
__UpperCamelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCamelCase = arg_max
# The final observation
__UpperCamelCase = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
__UpperCamelCase = ''
__UpperCamelCase = -1
for k_state in states_space:
__UpperCamelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCamelCase = probability
__UpperCamelCase = k_state
__UpperCamelCase = arg_max
# Process pointers backwards
__UpperCamelCase = last_state
__UpperCamelCase = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
__UpperCamelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def A ( snake_case :Union[str, Any] , snake_case :str , snake_case :List[Any] , snake_case :Optional[Any] , snake_case :Tuple , ) -> None:
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def A ( snake_case :List[str] , snake_case :Optional[Any] , snake_case :List[Any] , snake_case :List[Any] , snake_case :Optional[Any] , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def A ( snake_case :int , snake_case :int ) -> None:
_validate_list(__snake_case , 'observations_space' )
_validate_list(__snake_case , 'states_space' )
def A ( snake_case :Optional[Any] , snake_case :Optional[Any] ) -> None:
if not isinstance(_object , __snake_case ):
__UpperCamelCase = f'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
__UpperCamelCase = f'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def A ( snake_case :List[Any] , snake_case :Any , snake_case :Dict , ) -> None:
_validate_dict(__snake_case , 'initial_probabilities' , __snake_case )
_validate_nested_dict(__snake_case , 'transition_probabilities' )
_validate_nested_dict(__snake_case , 'emission_probabilities' )
def A ( snake_case :List[str] , snake_case :int ) -> None:
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def A ( snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :int = False ) -> None:
if not isinstance(_object , __snake_case ):
__UpperCamelCase = f'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
__UpperCamelCase = f'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
__UpperCamelCase = 'nested dictionary ' if nested else ''
__UpperCamelCase = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 316 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = "▁"
_a = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_a = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_a = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
_a = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="m2m100" , __lowerCAmelCase = None , __lowerCAmelCase=8 , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = language_codes
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCamelCase__ = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
lowerCamelCase__ = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = load_json(__lowerCAmelCase )
lowerCamelCase__ = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ = spm_file
lowerCamelCase__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
lowerCamelCase__ = len(self.encoder )
lowerCamelCase__ = {
self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )
}
lowerCamelCase__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )}
lowerCamelCase__ = {v: k for k, v in self.lang_token_to_id.items()}
lowerCamelCase__ = src_lang if src_lang is not None else '''en'''
lowerCamelCase__ = tgt_lang
lowerCamelCase__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCamelCase__ = num_madeup_words
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = [1] * len(self.prefix_tokens )
lowerCamelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = load_spm(self.spm_file , self.sp_model_kwargs )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = Path(__lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
lowerCamelCase__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = "en" , __lowerCAmelCase = None , __lowerCAmelCase = "ro" , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = self.get_lang_id(__lowerCAmelCase )
lowerCamelCase__ = tgt_lang_id
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
lowerCamelCase__ = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def lowerCAmelCase__(__snake_case ) -> Union[Dict, List]:
'''simple docstring'''
with open(__snake_case ,'''r''' ) as f:
return json.load(__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> None:
'''simple docstring'''
with open(__snake_case ,'''w''' ) as f:
json.dump(__snake_case ,__snake_case ,indent=2 )
| 209 | 0 |
"""simple docstring"""
from math import factorial
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(__UpperCamelCase ) // (factorial(__UpperCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'If a class of 40 students must be arranged into groups of',
F"""4 for group projects, there are {combinations(40, 4)} ways""",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F"""are {combinations(10, 3)} ways that first, second and""",
'third place can be awarded.',
)
| 371 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 341 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]:
'''simple docstring'''
A__ = self.framework
A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ )
return model_inputs
def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]:
'''simple docstring'''
A__ = self.model(**lowercase_ )
return model_outputs
def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int:
'''simple docstring'''
return super().__call__(*lowercase_,**lowercase_ )
| 7 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 68 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Dict = 'efficientformer'
def __init__( self , _SCREAMING_SNAKE_CASE = [3, 2, 6, 4] , _SCREAMING_SNAKE_CASE = [48, 96, 224, 448] , _SCREAMING_SNAKE_CASE = [True, True, True, True] , _SCREAMING_SNAKE_CASE = 448 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 7 , _SCREAMING_SNAKE_CASE = 5 , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1e-5 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 1e-12 , _SCREAMING_SNAKE_CASE = 224 , _SCREAMING_SNAKE_CASE = 1e-05 , **_SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : List[str] = hidden_sizes
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : str = num_attention_heads
snake_case_ : Any = initializer_range
snake_case_ : int = layer_norm_eps
snake_case_ : Dict = patch_size
snake_case_ : int = num_channels
snake_case_ : List[str] = depths
snake_case_ : Union[str, Any] = mlp_expansion_ratio
snake_case_ : Optional[Any] = downsamples
snake_case_ : Tuple = dim
snake_case_ : List[Any] = key_dim
snake_case_ : Dict = attention_ratio
snake_case_ : Tuple = resolution
snake_case_ : Union[str, Any] = pool_size
snake_case_ : Any = downsample_patch_size
snake_case_ : Tuple = downsample_stride
snake_case_ : List[Any] = downsample_pad
snake_case_ : Optional[Any] = drop_path_rate
snake_case_ : Dict = num_metaad_blocks
snake_case_ : List[Any] = distillation
snake_case_ : Optional[int] = use_layer_scale
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : List[Any] = image_size
snake_case_ : str = batch_norm_eps
| 36 |
lowercase : Optional[int] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 36 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__a : str = TypeVar("""T""")
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return (position - 1) // 2
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return (2 * position) + 1
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return (2 * position) + 2
class _UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
__lowercase = []
__lowercase = {}
__lowercase = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.elements
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.heap )
def _SCREAMING_SNAKE_CASE ( self ) -> bool:
'''simple docstring'''
return self.elements == 0
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
self.heap.append((elem, weight) )
__lowercase = self.elements
self.elements += 1
self._bubble_up(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> T:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowercase , __lowercase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowercase , __lowercase = self.heap[0]
self._bubble_down(lowerCAmelCase__ )
return elem
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
__lowercase = self.position_map[elem]
__lowercase = (elem, weight)
if position > 0:
__lowercase = get_parent_position(lowerCAmelCase__ )
__lowercase , __lowercase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowerCAmelCase__ )
else:
self._bubble_down(lowerCAmelCase__ )
else:
self._bubble_down(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
__lowercase = self.position_map[elem]
if curr_pos == 0:
return None
__lowercase = get_parent_position(lowerCAmelCase__ )
__lowercase , __lowercase = self.heap[curr_pos]
__lowercase , __lowercase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
return self._bubble_up(lowerCAmelCase__ )
return None
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
__lowercase = self.position_map[elem]
__lowercase , __lowercase = self.heap[curr_pos]
__lowercase = get_child_left_position(lowerCAmelCase__ )
__lowercase = get_child_right_position(lowerCAmelCase__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowercase , __lowercase = self.heap[child_left_position]
__lowercase , __lowercase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
return self._bubble_down(lowerCAmelCase__ )
if child_left_position < self.elements:
__lowercase , __lowercase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
return self._bubble_down(lowerCAmelCase__ )
else:
return None
if child_right_position < self.elements:
__lowercase , __lowercase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
return self._bubble_down(lowerCAmelCase__ )
return None
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
__lowercase = self.heap[nodea_pos][0]
__lowercase = self.heap[nodea_pos][0]
__lowercase , __lowercase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowercase = nodea_pos
__lowercase = nodea_pos
class _UpperCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
__lowercase = {}
__lowercase = 0
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.connections )
def __len__( self ) -> int:
'''simple docstring'''
return self.nodes
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
if node not in self.connections:
__lowercase = {}
self.nodes += 1
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
self.add_node(lowerCAmelCase__ )
self.add_node(lowerCAmelCase__ )
__lowercase = weight
__lowercase = weight
def UpperCAmelCase ( lowercase , ):
"""simple docstring"""
__lowercase = {node: maxsize for node in graph.connections}
__lowercase = {node: None for node in graph.connections}
__lowercase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowercase , lowercase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowercase = priority_queue.extract_min()
__lowercase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase , dist[neighbour] )
__lowercase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowercase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase , dist[neighbour] )
__lowercase = node
return dist, parent | 210 | import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : List[Any] = BertJapaneseTokenizer
__a : Any = False
__a : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
__lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = '''こんにちは、世界。 \nこんばんは、世界。'''
__lowercase = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase = self.get_input_output_texts(lowerCAmelCase__ )
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return text, ids
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(lowerCAmelCase__ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCAmelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''rb''' ) as handle:
__lowercase = pickle.load(lowerCAmelCase__ )
__lowercase = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = MecabTokenizer(do_lower_case=lowerCAmelCase__ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
try:
__lowercase = MecabTokenizer(
do_lower_case=lowerCAmelCase__ , normalize_text=lowerCAmelCase__ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = MecabTokenizer(normalize_text=lowerCAmelCase__ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(lowerCAmelCase__ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCAmelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''rb''' ) as handle:
__lowercase = pickle.load(lowerCAmelCase__ )
__lowercase = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = SudachiTokenizer(do_lower_case=lowerCAmelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = SudachiTokenizer(normalize_text=lowerCAmelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = SudachiTokenizer(trim_whitespace=lowerCAmelCase__ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(lowerCAmelCase__ )
__lowercase = '''こんにちは、世界。\nこんばんは、世界。'''
__lowercase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCAmelCase__ , '''wb''' ) as handle:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''rb''' ) as handle:
__lowercase = pickle.load(lowerCAmelCase__ )
__lowercase = tokenizer_new.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = JumanppTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = JumanppTokenizer(normalize_text=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = JumanppTokenizer(trim_whitespace=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
__lowercase = {}
for i, token in enumerate(lowerCAmelCase__ ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
__lowercase = tokenizer.subword_tokenizer
__lowercase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
__lowercase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
__lowercase = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : Union[str, Any] = BertJapaneseTokenizer
__a : Tuple = False
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
super().setUp()
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
__lowercase = '''こんにちは、世界。 \nこんばんは、世界。'''
__lowercase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
__lowercase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
lowerCAmelCase__ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__lowercase = {}
for i, token in enumerate(lowerCAmelCase__ ):
__lowercase = i
__lowercase = CharacterTokenizer(vocab=lowerCAmelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
__lowercase = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
__lowercase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cl-tohoku/bert-base-japanese'''
__lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
__lowercase = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) ) | 210 | 1 |
from __future__ import annotations
def lowerCamelCase ( a_ , a_ = None , a_ = None , a_ = False , ) -> tuple[int, float, str]:
lowerCAmelCase_ = cipher_alphabet or [chr(a_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase_ = {
'a': 0.08_497,
'b': 0.01_492,
'c': 0.02_202,
'd': 0.04_253,
'e': 0.11_162,
'f': 0.02_228,
'g': 0.02_015,
'h': 0.06_094,
'i': 0.07_546,
'j': 0.00_153,
'k': 0.01_292,
'l': 0.04_025,
'm': 0.02_406,
'n': 0.06_749,
'o': 0.07_507,
'p': 0.01_929,
'q': 0.00_095,
'r': 0.07_587,
's': 0.06_327,
't': 0.09_356,
'u': 0.02_758,
'v': 0.00_978,
'w': 0.02_560,
'x': 0.00_150,
'y': 0.01_994,
'z': 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCAmelCase_ = frequencies_dict
if not case_sensitive:
lowerCAmelCase_ = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase_ = {}
# cycle through all of the shifts
for shift in range(len(a_ ) ):
lowerCAmelCase_ = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase_ = (alphabet_letters.index(letter.lower() ) - shift) % len(
a_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase_ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase_ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase_ = decrypted_with_shift.lower().count(a_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase_ = decrypted_with_shift.count(a_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase_ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(a_ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase_ = min(
a_ , key=a_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
) | 369 |
from maths.prime_factors import prime_factors
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : Any , snake_case_ : List[Any] , snake_case_ : List[str]=1_3 , snake_case_ : Union[str, Any]=7 , snake_case_ : List[str]=True , snake_case_ : Optional[int]=True , snake_case_ : Tuple=True , snake_case_ : Tuple=True , snake_case_ : Optional[int]=9_9 , snake_case_ : Dict=3_2 , snake_case_ : Tuple=5 , snake_case_ : Union[str, Any]=4 , snake_case_ : Union[str, Any]=3_7 , snake_case_ : Dict="gelu" , snake_case_ : Tuple=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=5_1_2 , snake_case_ : str=1_6 , snake_case_ : Dict=2 , snake_case_ : Optional[Any]=0.0_2 , snake_case_ : Dict=4 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = FlaxRoFormerModelTester(self )
@slow
def lowercase ( self : Tuple ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=snake_case_ )
_UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(snake_case_ )[0]
_UpperCAmelCase = 5_0_0_0_0
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) )
| 22 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE :Dict = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ):
_UpperCAmelCase = d_model
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length
_UpperCAmelCase = cardinality
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = embedding_dimension
_UpperCAmelCase = is_training
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = context_length
_UpperCAmelCase = prediction_length + label_length
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
def lowercase ( self : Union[str, Any] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase ( self : int , snake_case_ : Optional[Any] ):
_UpperCAmelCase = config.context_length + max(config.lags_sequence )
_UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
_UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["missing_keys"] , [] )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ )
_UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase ( self : Dict ):
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" )
_UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase )
return batch
@require_torch
@slow
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch()
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_UpperCAmelCase = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ )
_UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 22 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
super().__init__()
lowercase_ = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = False
lowercase_ = nn.Dropout(p=lowerCAmelCase_)
lowercase_ = TaConfig(
vocab_size=lowerCAmelCase_ , d_model=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , feed_forward_proj=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_):
lowercase_ = TaBlock(lowerCAmelCase_)
self.encoders.append(lowerCAmelCase_)
lowercase_ = TaLayerNorm(lowerCAmelCase_)
lowercase_ = nn.Dropout(p=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.token_embedder(lowerCAmelCase_)
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(lowerCAmelCase_ , device=encoder_input_tokens.device)
x += self.position_encoding(lowerCAmelCase_)
lowercase_ = self.dropout_pre(lowerCAmelCase_)
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_)
for lyr in self.encoders:
lowercase_ = lyr(lowerCAmelCase_ , lowerCAmelCase_)[0]
lowercase_ = self.layer_norm(lowerCAmelCase_)
return self.dropout_post(lowerCAmelCase_), encoder_inputs_mask
| 361 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_lowercase)
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Optional[int] , **__UpperCamelCase : int ) -> str:
super().__init__(**__UpperCamelCase )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(__UpperCamelCase )
def __call__( self : List[Any] , __UpperCamelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __UpperCamelCase : Union[str, List[str]] = None , **__UpperCamelCase : Union[str, Any] , ) -> str:
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop('''text_queries''' )
if isinstance(__UpperCamelCase , (str, Image.Image) ):
_UpperCamelCase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(__UpperCamelCase , **__UpperCamelCase )
return results
def _UpperCamelCase ( self : Optional[int] , **__UpperCamelCase : str ) -> Optional[int]:
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs['''threshold''']
if "top_k" in kwargs:
_UpperCamelCase = kwargs['''top_k''']
return {}, {}, postprocess_params
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : int ) -> Optional[int]:
_UpperCamelCase = load_image(inputs['''image'''] )
_UpperCamelCase = inputs['''candidate_labels''']
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = candidate_labels.split(''',''' )
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__UpperCamelCase ):
_UpperCamelCase = self.tokenizer(__UpperCamelCase , return_tensors=self.framework )
_UpperCamelCase = self.image_processor(__UpperCamelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__UpperCamelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Any ) -> Any:
_UpperCamelCase = model_inputs.pop('''target_size''' )
_UpperCamelCase = model_inputs.pop('''candidate_label''' )
_UpperCamelCase = model_inputs.pop('''is_last''' )
_UpperCamelCase = self.model(**__UpperCamelCase )
_UpperCamelCase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str=0.1 , __UpperCamelCase : Any=None ) -> Dict:
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output['''candidate_label''']
_UpperCamelCase = BaseModelOutput(__UpperCamelCase )
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=__UpperCamelCase , threshold=__UpperCamelCase , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs['''scores'''][index].item()
_UpperCamelCase = self._get_bounding_box(outputs['''boxes'''][index][0] )
_UpperCamelCase = {'''score''': score, '''label''': label, '''box''': box}
results.append(__UpperCamelCase )
_UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x["score"] , reverse=__UpperCamelCase )
if top_k:
_UpperCamelCase = results[:top_k]
return results
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : "torch.Tensor" ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 256 | """simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def lowercase ( a__ : Dict , a__ : Dict , a__ : List[str] , a__ : int , a__ : Any ) -> Optional[Any]:
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(a__ , a__ )
if weight_type is not None:
_UpperCamelCase = getattr(a__ , a__ ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowercase ( a__ : str , a__ : Any , a__ : List[Any] ) -> List[Any]:
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(a__ )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
else:
_UpperCamelCase = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( a__ : str , a__ : int , a__ : Optional[int] , a__ : Optional[Any] , a__ : int ) -> Any:
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(a__ )
def lowercase ( a__ : List[str] , a__ : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = SEWConfig()
if is_finetuned:
_UpperCamelCase = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase = model.cfg
_UpperCamelCase = fs_config.conv_bias
_UpperCamelCase = eval(fs_config.conv_feature_layers )
_UpperCamelCase = [x[0] for x in conv_layers]
_UpperCamelCase = [x[1] for x in conv_layers]
_UpperCamelCase = [x[2] for x in conv_layers]
_UpperCamelCase = '''gelu'''
_UpperCamelCase = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_UpperCamelCase = 0.0
_UpperCamelCase = fs_config.activation_fn.name
_UpperCamelCase = fs_config.encoder_embed_dim
_UpperCamelCase = 0.02
_UpperCamelCase = fs_config.encoder_ffn_embed_dim
_UpperCamelCase = 1e-5
_UpperCamelCase = fs_config.encoder_layerdrop
_UpperCamelCase = fs_config.encoder_attention_heads
_UpperCamelCase = fs_config.conv_pos_groups
_UpperCamelCase = fs_config.conv_pos
_UpperCamelCase = len(a__ )
_UpperCamelCase = fs_config.encoder_layers
_UpperCamelCase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase = model.cfg
_UpperCamelCase = fs_config.final_dropout
_UpperCamelCase = fs_config.layerdrop
_UpperCamelCase = fs_config.activation_dropout
_UpperCamelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase = fs_config.attention_dropout
_UpperCamelCase = fs_config.dropout_input
_UpperCamelCase = fs_config.dropout
_UpperCamelCase = fs_config.mask_channel_length
_UpperCamelCase = fs_config.mask_channel_prob
_UpperCamelCase = fs_config.mask_length
_UpperCamelCase = fs_config.mask_prob
_UpperCamelCase = '''Wav2Vec2FeatureExtractor'''
_UpperCamelCase = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def lowercase ( a__ : List[Any] , a__ : Optional[Any] , a__ : str=None , a__ : Tuple=None , a__ : Tuple=True ) -> Union[str, Any]:
if is_finetuned:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase = SEWConfig.from_pretrained(a__ )
else:
_UpperCamelCase = convert_config(model[0] , a__ )
_UpperCamelCase = model[0].eval()
_UpperCamelCase = True if config.feat_extract_norm == '''layer''' else False
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
if is_finetuned:
if dict_path:
_UpperCamelCase = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase = target_dict.pad_index
_UpperCamelCase = target_dict.bos_index
_UpperCamelCase = target_dict.pad_index
_UpperCamelCase = target_dict.bos_index
_UpperCamelCase = target_dict.eos_index
_UpperCamelCase = len(target_dict.symbols )
_UpperCamelCase = os.path.join(a__ , '''vocab.json''' )
if not os.path.isdir(a__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
with open(a__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , a__ )
_UpperCamelCase = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a__ , )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
_UpperCamelCase = SEWForCTC(a__ )
else:
_UpperCamelCase = SEWModel(a__ )
feature_extractor.save_pretrained(a__ )
recursively_load_weights(a__ , a__ , a__ )
hf_model.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 256 | 1 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : Any = 0
A : str = 0
A : str = {}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if vertex not in self.adjacency:
A : List[Any] = {}
self.num_vertices += 1
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
self.add_vertex(lowerCamelCase__ )
self.add_vertex(lowerCamelCase__ )
if head == tail:
return
A : List[str] = weight
A : Dict = weight
def _lowerCAmelCase ( self ):
A : int = self.get_edges()
for edge in edges:
A , A , A : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase__ ) ):
A : Dict = list(edges[i] )
edges.sort(key=lambda lowerCamelCase__ : e[2] )
for i in range(len(lowerCamelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A : Union[str, Any] = edges[i][2] + 1
for edge in edges:
A , A , A : Tuple = edge
A : Any = weight
A : int = weight
def __str__( self ):
A : List[Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
A : Optional[Any] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _lowerCAmelCase ( self ):
return self.adjacency.keys()
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__=None, lowerCamelCase__=None ):
A : Optional[int] = Graph()
if vertices is None:
A : Dict = []
if edges is None:
A : Tuple = []
for vertex in vertices:
g.add_vertex(lowerCamelCase__ )
for edge in edges:
g.add_edge(*lowerCamelCase__ )
return g
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : int = {}
A : Optional[int] = {}
def __len__( self ):
return len(self.parent )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if item in self.parent:
return self.find(lowerCamelCase__ )
A : Optional[int] = item
A : Optional[Any] = 0
return item
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if item not in self.parent:
return self.make_set(lowerCamelCase__ )
if item != self.parent[item]:
A : Any = self.find(self.parent[item] )
return self.parent[item]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : int = self.find(lowerCamelCase__ )
A : Any = self.find(lowerCamelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A : int = roota
return roota
if self.rank[roota] < self.rank[roota]:
A : str = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A : List[str] = roota
return roota
return None
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__ ):
A : int = graph.num_vertices
A : Optional[int] = Graph.UnionFind()
A : Optional[Any] = []
while num_components > 1:
A : Tuple = {}
for vertex in graph.get_vertices():
A : str = -1
A : List[Any] = graph.get_edges()
for edge in edges:
A , A , A : Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
A , A , A : Optional[int] = edge
A : Tuple = union_find.find(lowerCamelCase__ )
A : Dict = union_find.find(lowerCamelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A : Any = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A : Optional[Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A , A , A : Tuple = cheap_edge[vertex]
if union_find.find(lowerCamelCase__ ) != union_find.find(lowerCamelCase__ ):
union_find.union(lowerCamelCase__, lowerCamelCase__ )
mst_edges.append(cheap_edge[vertex] )
A : List[str] = num_components - 1
A : Union[str, Any] = Graph.build(edges=lowerCamelCase__ )
return mst
| 115 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : int = "altclip_text_model"
def __init__( self, lowerCamelCase__=25_0002, lowerCamelCase__=1024, lowerCamelCase__=24, lowerCamelCase__=16, lowerCamelCase__=4096, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=514, lowerCamelCase__=1, lowerCamelCase__=0.02, lowerCamelCase__=0.02, lowerCamelCase__=1e-05, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=768, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : Union[str, Any] = vocab_size
A : Dict = hidden_size
A : Union[str, Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : str = hidden_act
A : Dict = intermediate_size
A : List[str] = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : Tuple = max_position_embeddings
A : Optional[Any] = type_vocab_size
A : Optional[Any] = initializer_range
A : Optional[int] = initializer_factor
A : Tuple = layer_norm_eps
A : List[str] = position_embedding_type
A : int = use_cache
A : int = project_dim
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "altclip_vision_model"
def __init__( self, lowerCamelCase__=768, lowerCamelCase__=3072, lowerCamelCase__=512, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3, lowerCamelCase__=224, lowerCamelCase__=32, lowerCamelCase__="quick_gelu", lowerCamelCase__=1e-5, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=1.0, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Optional[Any] = hidden_size
A : Optional[int] = intermediate_size
A : Union[str, Any] = projection_dim
A : str = num_hidden_layers
A : int = num_attention_heads
A : Optional[Any] = num_channels
A : Tuple = patch_size
A : List[Any] = image_size
A : Optional[int] = initializer_range
A : Union[str, Any] = initializer_factor
A : List[str] = attention_dropout
A : int = layer_norm_eps
A : str = hidden_act
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, **lowerCamelCase__ ):
cls._set_token_in_kwargs(lowerCamelCase__ )
A , A : Optional[Any] = cls.get_config_dict(lowerCamelCase__, **lowerCamelCase__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
A : Any = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase__, **lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[Any] = "altclip"
__lowerCamelCase : List[Any] = True
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=768, lowerCamelCase__=2.6592, **lowerCamelCase__ ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A : Dict = kwargs.pop("""text_config_dict""", lowerCamelCase__ )
A : str = kwargs.pop("""vision_config_dict""", lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A : Dict = {}
# This is the complete result when using `text_config_dict`.
A : str = AltCLIPTextConfig(**lowerCamelCase__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A : Optional[Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A : Optional[int] = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCamelCase__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A : int = {}
# This is the complete result when using `vision_config_dict`.
A : Union[str, Any] = AltCLIPVisionConfig(**lowerCamelCase__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A : Optional[int] = {
str(lowerCamelCase__ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A : Optional[int] = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A : Any = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCamelCase__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A : Tuple = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
A : Union[str, Any] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
A : Dict = AltCLIPTextConfig(**lowerCamelCase__ )
A : Optional[int] = AltCLIPVisionConfig(**lowerCamelCase__ )
A : List[str] = projection_dim
A : Any = logit_scale_init_value
A : Tuple = 1.0
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : str = copy.deepcopy(self.__dict__ )
A : Any = self.text_config.to_dict()
A : List[str] = self.vision_config.to_dict()
A : Union[str, Any] = self.__class__.model_type
return output
| 115 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Tuple = "upernet"
def __init__( self : str, _lowerCamelCase : List[Any]=None, _lowerCamelCase : Optional[int]=5_12, _lowerCamelCase : int=0.02, _lowerCamelCase : str=[1, 2, 3, 6], _lowerCamelCase : str=True, _lowerCamelCase : Tuple=0.4, _lowerCamelCase : Optional[int]=3_84, _lowerCamelCase : List[str]=2_56, _lowerCamelCase : Tuple=1, _lowerCamelCase : int=False, _lowerCamelCase : Dict=2_55, **_lowerCamelCase : int, ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__A = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(_lowerCamelCase, _lowerCamelCase ):
__A = backbone_config.get('''model_type''' )
__A = CONFIG_MAPPING[backbone_model_type]
__A = config_class.from_dict(_lowerCamelCase )
__A = backbone_config
__A = hidden_size
__A = initializer_range
__A = pool_scales
__A = use_auxiliary_head
__A = auxiliary_loss_weight
__A = auxiliary_in_channels
__A = auxiliary_channels
__A = auxiliary_num_convs
__A = auxiliary_concat_input
__A = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = copy.deepcopy(self.__dict__ )
__A = self.backbone_config.to_dict()
__A = self.__class__.model_type
return output
| 266 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
], )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ):
'''simple docstring'''
__A = compute_mauve(
p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, )
return out
| 266 | 1 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _snake_case ( self )->List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : Dict = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _snake_case ( self )->int:
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
A_ : Tuple = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : Any = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A_ : Union[str, Any] = DDPMScheduler()
A_ : Any = AudioDiffusionPipeline(vqvae=_SCREAMING_SNAKE_CASE , unet=self.dummy_unet , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
A_ : str = pipe(generator=_SCREAMING_SNAKE_CASE , steps=4 )
A_ : Optional[int] = output.audios[0]
A_ : Union[str, Any] = output.images[0]
A_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
A_ : Dict = pipe(generator=_SCREAMING_SNAKE_CASE , steps=4 , return_dict=_SCREAMING_SNAKE_CASE )
A_ : Tuple = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A_ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
A_ : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
A_ : Optional[Any] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A_ : Any = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A_ : Optional[Any] = DDIMScheduler()
A_ : Optional[int] = self.dummy_vqvae_and_unet
A_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
A_ : Tuple = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
np.random.seed(0 )
A_ : Dict = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A_ : int = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
A_ : List[str] = pipe(raw_audio=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , start_step=5 , steps=10 )
A_ : Optional[Any] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
A_ : Optional[int] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A_ : str = self.dummy_unet_condition
A_ : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_SCREAMING_SNAKE_CASE , mel=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
np.random.seed(0 )
A_ : str = torch.rand((1, 1, 10) )
A_ : Optional[Any] = pipe(generator=_SCREAMING_SNAKE_CASE , encoding=_SCREAMING_SNAKE_CASE )
A_ : int = output.images[0]
A_ : Optional[int] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
A_ : str = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Optional[int] = torch_device
A_ : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
A_ : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(42 )
A_ : Union[str, Any] = pipe(generator=_SCREAMING_SNAKE_CASE )
A_ : str = output.audios[0]
A_ : int = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A_ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
A_ : Tuple = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] = None):
"""simple docstring"""
lowercase_ = value
lowercase_ = random()
lowercase_ = None
lowercase_ = None
def __repr__( self : Any):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1)
def __str__( self : Optional[int]):
"""simple docstring"""
lowercase_ = str(self.value) + ''' '''
lowercase_ = str(self.left or """""")
lowercase_ = str(self.right or """""")
return value + left + right
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase_ = split(root.left , __lowerCAmelCase )
return left, root
else:
lowercase_ = split(root.right , __lowerCAmelCase )
return root, right
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase_ = merge(left.right , __lowerCAmelCase )
return left
else:
lowercase_ = merge(__lowerCAmelCase , right.left )
return right
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = Node(__lowerCAmelCase )
lowercase_ = split(__lowerCAmelCase , __lowerCAmelCase )
return merge(merge(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = split(__lowerCAmelCase , value - 1 )
lowercase_ = split(__lowerCAmelCase , __lowerCAmelCase )
return merge(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase_ = insert(__lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
lowercase_ = erase(__lowerCAmelCase , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
lowercase_ = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. \'q\' to quit. """ )
lowercase_ = input()
while args != "q":
lowercase_ = interact_treap(__lowerCAmelCase , __lowerCAmelCase )
print(__lowerCAmelCase )
lowercase_ = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 136 | '''simple docstring'''
from math import pow
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowercase__ : Optional[Any] = int(pow(UpperCAmelCase , UpperCAmelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowercase__ , lowercase__ : Dict = backtrack(
UpperCAmelCase , UpperCAmelCase , current_number + 1 , UpperCAmelCase , UpperCAmelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowercase__ , lowercase__ : str = backtrack(
UpperCAmelCase , UpperCAmelCase , current_number + 1 , UpperCAmelCase , UpperCAmelCase )
return current_sum, solutions_count
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(UpperCAmelCase , UpperCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
UpperCamelCase__ = {'''mobilebert-uncased''': 5_1_2}
UpperCamelCase__ = {}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = MobileBertTokenizer
def __init__( self : List[Any] , _A : int=None , _A : List[Any]=None , _A : Optional[Any]=True , _A : List[Any]="[UNK]" , _A : str="[SEP]" , _A : List[Any]="[PAD]" , _A : Any="[CLS]" , _A : Any="[MASK]" , _A : Optional[Any]=True , _A : Union[str, Any]=None , **_A : List[str] , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase__ : List[str] = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase__ : Optional[Any] = do_lower_case
UpperCAmelCase__ : Union[str, Any] = strip_accents
UpperCAmelCase__ : Dict = tokenize_chinese_chars
UpperCAmelCase__ : Tuple = normalizer_class(**_A )
UpperCAmelCase__ : str = do_lower_case
def lowercase_ ( self : Tuple , _A : Optional[Any] , _A : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : Optional[Any] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 299 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : str = (32, 32)
UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 4
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : List[str] = (32, 32)
UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (4, 32, 32)
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
UpperCAmelCase__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : Union[str, Any] = noise.to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A )
UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1e-3 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : str = noise.to(_A )
UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) )
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any , _A : str=(32, 32) ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : List[str] = self.dummy_input
UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A )
UpperCAmelCase__ : Optional[Any] = noise
UpperCAmelCase__ : Any = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : Dict = (256, 256)
UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : int = (32, 32)
UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : int = model(_A , _A ).sample
UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
| 299 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : Optional[int] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__a ):
_a = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_a = FlaxAutoModel.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def UpperCamelCase__ ( self : int ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__a ):
_a = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_a = FlaxAutoModel.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_a = AutoTokenizer.from_pretrained(__a )
_a = FlaxBertModel.from_pretrained(__a )
_a = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__a : Optional[Any] ):
return model(**__a )
eval(**__a ).block_until_ready()
@slow
def UpperCamelCase__ ( self : Dict ):
for model_name in ["roberta-base", "roberta-large"]:
_a = AutoTokenizer.from_pretrained(__a )
_a = FlaxRobertaModel.from_pretrained(__a )
_a = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__a : str ):
return model(**__a )
eval(**__a ).block_until_ready()
def UpperCamelCase__ ( self : Any ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = FlaxAutoModel.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : int ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = FlaxAutoModel.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : Dict ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
_a = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : str ):
with self.assertRaisesRegex(__a , "Use `from_pt=True` to load this model" ):
_a = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 63 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase__ ( snake_case_ :ndarray ):
return np.dot(snake_case_ , snake_case_ )
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
__UpperCAmelCase = regularization
__UpperCAmelCase = gamma
if kernel == "linear":
__UpperCAmelCase = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__UpperCAmelCase = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__UpperCAmelCase = F'''Unknown kernel: {kernel}'''
raise ValueError(_lowercase )
def a ( self : Dict , _lowercase : ndarray , _lowercase : ndarray ):
return np.dot(_lowercase , _lowercase )
def a ( self : Any , _lowercase : ndarray , _lowercase : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a ( self : Union[str, Any] , _lowercase : list[ndarray] , _lowercase : ndarray ):
__UpperCAmelCase = observations
__UpperCAmelCase = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__UpperCAmelCase) , ) = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
__UpperCAmelCase = 0
((__UpperCAmelCase) , ) = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
__UpperCAmelCase = LinearConstraint(_lowercase , 0 , 0 )
__UpperCAmelCase = Bounds(0 , self.regularization )
__UpperCAmelCase = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
__UpperCAmelCase = l_star
# calculating mean offset of separation plane to points
__UpperCAmelCase = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__UpperCAmelCase = s / n
def a ( self : List[Any] , _lowercase : ndarray ):
__UpperCAmelCase = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = [0] * len(lowercase_ )
__lowerCAmelCase = []
__lowerCAmelCase = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
__lowerCAmelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__lowerCAmelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_a : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 362 |
'''simple docstring'''
import random
def _lowerCAmelCase ( lowercase ) -> bool:
__lowerCAmelCase = num - 1
__lowerCAmelCase = 0
while s % 2 == 0:
__lowerCAmelCase = s // 2
t += 1
for _ in range(5 ):
__lowerCAmelCase = random.randrange(2 , num - 1 )
__lowerCAmelCase = pow(lowercase , lowercase , lowercase )
if v != 1:
__lowerCAmelCase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowerCAmelCase = i + 1
__lowerCAmelCase = (v**2) % num
return True
def _lowerCAmelCase ( lowercase ) -> bool:
if num < 2:
return False
__lowerCAmelCase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowercase )
def _lowerCAmelCase ( lowercase = 1024 ) -> int:
while True:
__lowerCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowercase ):
return num
if __name__ == "__main__":
_a : Optional[int] = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 46 | 0 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True )-> Union[str, Any]:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(lowerCAmelCase_ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
_UpperCAmelCase : List[str] = lower
_UpperCAmelCase : Tuple = higher
_UpperCAmelCase : Optional[Any] = []
while True:
_UpperCAmelCase : int = get_avg(lowerCAmelCase_ , lowerCAmelCase_ )
last_numbers.append(lowerCAmelCase_ )
if answer(lowerCAmelCase_ ) == "low":
_UpperCAmelCase : Dict = number
elif answer(lowerCAmelCase_ ) == "high":
_UpperCAmelCase : int = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def snake_case_ ( )-> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = int(input("""Enter lower value : """ ).strip() )
_UpperCAmelCase : Optional[int] = int(input("""Enter high value : """ ).strip() )
_UpperCAmelCase : Optional[int] = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 215 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCAmelCase__ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple ="token-classification"
def __init__( self , snake_case__ ):
"""simple docstring"""
if type(snake_case__ ) == dict:
lowerCAmelCase : List[str] = Namespace(**snake_case__ )
lowerCAmelCase : Optional[Any] = import_module("tasks" )
try:
lowerCAmelCase : Dict = getattr(snake_case__ , hparams.task_type )
lowerCAmelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCAmelCase : Dict = self.token_classification_task.get_labels(hparams.labels )
lowerCAmelCase : str = CrossEntropyLoss().ignore_index
super().__init__(snake_case__ , len(self.labels ) , self.mode )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
return self.model(**snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase : Optional[int] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase : str = self(**snake_case__ )
lowerCAmelCase : Tuple = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.hparams
for mode in ["train", "dev", "test"]:
lowerCAmelCase : Optional[int] = self._feature_file(snake_case__ )
if os.path.exists(snake_case__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , snake_case__ )
lowerCAmelCase : Dict = torch.load(snake_case__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCAmelCase : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case__ )
lowerCAmelCase : Optional[int] = self.token_classification_task.convert_examples_to_features(
snake_case__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case__ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , snake_case__ )
torch.save(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : Dict = self._feature_file(snake_case__ )
logger.info("Loading features from cached file %s" , snake_case__ )
lowerCAmelCase : Optional[Any] = torch.load(snake_case__ )
lowerCAmelCase : Optional[int] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCAmelCase : Tuple = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCAmelCase : Optional[Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCAmelCase : Optional[int] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , batch_size=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
"""Compute validation""" ""
lowerCAmelCase : str = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase : List[Any] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase : Any = self(**snake_case__ )
lowerCAmelCase , lowerCAmelCase : List[str] = outputs[:2]
lowerCAmelCase : Dict = logits.detach().cpu().numpy()
lowerCAmelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowerCAmelCase : Optional[Any] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowerCAmelCase : List[Any] = np.argmax(snake_case__ , axis=2 )
lowerCAmelCase : Optional[int] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCAmelCase : str = dict(enumerate(self.labels ) )
lowerCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCAmelCase : Dict = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(snake_case__ , snake_case__ ),
"precision": precision_score(snake_case__ , snake_case__ ),
"recall": recall_score(snake_case__ , snake_case__ ),
"f1": fa_score(snake_case__ , snake_case__ ),
}
lowerCAmelCase : int = dict(results.items() )
lowerCAmelCase : Tuple = results
return ret, preds_list, out_label_list
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = self._eval_end(snake_case__ )
lowerCAmelCase : int = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = self._eval_end(snake_case__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCAmelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(snake_case__ , snake_case__ )
parser.add_argument(
"--task_type" , default="NER" , type=snake_case__ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=128 , type=snake_case__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=snake_case__ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=snake_case__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCAmelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = NERTransformer(args)
lowerCAmelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCAmelCase__ = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
lowerCAmelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 108 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowerCAmelCase :
def __init__(self , __magic_name__ = "cpu" , __magic_name__ = "openai/clip-vit-large-patch14" ) -> None:
'''simple docstring'''
snake_case_ : Any = device
snake_case_ : Dict = CLIPTokenizerFast.from_pretrained(__magic_name__ )
snake_case_ : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073]
snake_case_ : Any = [0.26_862_954, 0.26_130_258, 0.27_577_711]
snake_case_ : str = torchvision.transforms.Normalize(self.image_mean , self.image_std )
snake_case_ : Dict = torchvision.transforms.Resize(224 )
snake_case_ : List[Any] = torchvision.transforms.CenterCrop(224 )
def lowerCamelCase (self , __magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = self.resize(__magic_name__ )
snake_case_ : Tuple = self.center_crop(__magic_name__ )
snake_case_ : Any = self.normalize(__magic_name__ )
return images
def __call__(self , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.tokenizer(text=__magic_name__ , **__magic_name__ )
snake_case_ : Dict = self.preprocess_img(__magic_name__ )
snake_case_ : str = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __lowerCAmelCase ( nn.Module ):
def __init__(self , __magic_name__=10 , __magic_name__=0.01 , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=True , __magic_name__="image" , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=False , ) -> None:
'''simple docstring'''
super().__init__()
snake_case_ : Dict = None
snake_case_ : List[Any] = device if device else get_device()
if vqgan:
snake_case_ : List[Any] = vqgan
else:
snake_case_ : Dict = load_vqgan(self.device , conf_path=__magic_name__ , ckpt_path=__magic_name__ )
self.vqgan.eval()
if clip:
snake_case_ : int = clip
else:
snake_case_ : List[str] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
snake_case_ : str = ProcessorGradientFlow(device=self.device )
snake_case_ : Optional[Any] = iterations
snake_case_ : str = lr
snake_case_ : Union[str, Any] = log
snake_case_ : Tuple = make_grid
snake_case_ : Optional[Any] = return_val
snake_case_ : Union[str, Any] = quantize
snake_case_ : Optional[Any] = self.vqgan.decoder.z_shape
def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=5 , __magic_name__=True ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = []
if output_path is None:
snake_case_ : List[Any] = '''./animation.gif'''
if input_path is None:
snake_case_ : Dict = self.save_path
snake_case_ : Tuple = sorted(glob(input_path + '''/*''' ) )
if not len(__magic_name__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__magic_name__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
snake_case_ : Union[str, Any] = total_duration / len(__magic_name__ )
snake_case_ : str = [frame_duration] * len(__magic_name__ )
if extend_frames:
snake_case_ : int = 1.5
snake_case_ : Dict = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__magic_name__ ) )
imageio.mimsave(__magic_name__ , __magic_name__ , duration=__magic_name__ )
print(F'''gif saved to {output_path}''' )
def lowerCamelCase (self , __magic_name__=None , __magic_name__=None ) -> str:
'''simple docstring'''
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
snake_case_ : Any = preprocess(Image.open(__magic_name__ ) , target_image_size=256 ).to(self.device )
snake_case_ : Union[str, Any] = preprocess_vqgan(__magic_name__ )
snake_case_ : Optional[Any] = self.vqgan.encode(__magic_name__ )
return z
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Any = self.latent.detach().requires_grad_()
snake_case_ : Optional[int] = base_latent + transform_vector
if self.quantize:
snake_case_ : Dict = self.vqgan.quantize(__magic_name__ )
else:
snake_case_ : int = trans_latent
return self.vqgan.decode(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = self.clip_preprocessor(text=__magic_name__ , images=__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ )
snake_case_ : Any = self.clip(**__magic_name__ )
snake_case_ : List[str] = clip_outputs.logits_per_image
if weights is not None:
snake_case_ : Dict = similarity_logits * weights
return similarity_logits.sum()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self._get_clip_similarity(pos_prompts['''prompts'''] , __magic_name__ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
snake_case_ : str = self._get_clip_similarity(neg_prompts['''prompts'''] , __magic_name__ , weights=neg_prompts['''weights'''] )
else:
snake_case_ : Optional[Any] = torch.tensor([1] , device=self.device )
snake_case_ : Optional[int] = -torch.log(__magic_name__ ) + torch.log(__magic_name__ )
return loss
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : str = torch.randn_like(self.latent , requires_grad=__magic_name__ , device=self.device )
snake_case_ : Tuple = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
snake_case_ : List[Any] = self._add_vector(__magic_name__ )
snake_case_ : int = loop_post_process(__magic_name__ )
snake_case_ : Union[str, Any] = self._get_CLIP_loss(__magic_name__ , __magic_name__ , __magic_name__ )
print('''CLIP loss''' , __magic_name__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__magic_name__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
wandb.init(reinit=__magic_name__ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
snake_case_ : Dict = Image.open(__magic_name__ )
snake_case_ : int = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(__magic_name__ ) )
def lowerCamelCase (self , __magic_name__ ) -> Tuple:
'''simple docstring'''
if not prompts:
return []
snake_case_ : str = []
snake_case_ : str = []
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Union[str, Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__magic_name__ , (tuple, list) ):
snake_case_ : Tuple = prompt[0]
snake_case_ : Any = float(prompt[1] )
elif ":" in prompt:
snake_case_ : Union[str, Any] = prompt.split(''':''' )
snake_case_ : int = float(__magic_name__ )
else:
snake_case_ : str = prompt
snake_case_ : Dict = 1.0
processed_prompts.append(__magic_name__ )
weights.append(__magic_name__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__magic_name__ , device=self.device ),
}
def lowerCamelCase (self , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__=False , __magic_name__=True , __magic_name__=True , __magic_name__=None , ) -> Any:
'''simple docstring'''
if image_path:
snake_case_ : Optional[int] = self._get_latent(__magic_name__ )
else:
snake_case_ : List[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__magic_name__ , __magic_name__ , __magic_name__ )
assert pos_prompts, "You must provide at least one positive prompt."
snake_case_ : Tuple = self.process_prompts(__magic_name__ )
snake_case_ : Dict = self.process_prompts(__magic_name__ )
if save_final and save_path is None:
snake_case_ : Any = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__magic_name__ ):
os.makedirs(__magic_name__ )
else:
snake_case_ : Optional[int] = save_path + '''_''' + get_timestamp()
os.makedirs(__magic_name__ )
snake_case_ : str = save_path
snake_case_ : Tuple = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__magic_name__ ) )
snake_case_ : Tuple = loop_post_process(__magic_name__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(__magic_name__ , __magic_name__ , __magic_name__ ) ):
if show_intermediate:
show_pil(__magic_name__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__magic_name__ )} )
if show_final:
show_pil(__magic_name__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 370 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''biogpt'''
def __init__(self , __magic_name__=4_2384 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1024 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=True , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , **__magic_name__ , ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : Optional[int] = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Optional[int] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = scale_embedding
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = layerdrop
snake_case_ : Optional[Any] = activation_dropout
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 279 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 209 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model"}
_a = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_a = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCamelCase__ = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase__ = '''<|endoftext|>''' if eos_token is None else eos_token
lowerCamelCase__ = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase__ = unk_token if pad_token is None else pad_token
lowerCamelCase__ = eos_token if bos_token is None else bos_token
else:
lowerCamelCase__ = '''<pad>''' if pad_token is None else pad_token
lowerCamelCase__ = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = remove_space
lowerCamelCase__ = keep_accents
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase__ = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase__ = re.compile(
F'[{"".join(map(__lowerCAmelCase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.non_printing_characters_re.sub('''''' , __lowerCAmelCase )
# Normalize whitespaces
lowerCamelCase__ = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCamelCase__ = unicodedata.normalize('''NFC''' , __lowerCAmelCase )
return text
def __lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.preprocess_text(__lowerCAmelCase )
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCAmelCase )
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
return out_string
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = self.preprocess_text(__lowerCAmelCase )
lowerCamelCase__ = self.sp_model.encode(__lowerCAmelCase )
else:
lowerCamelCase__ = [self.preprocess_text(__lowerCAmelCase ) for t in text]
lowerCamelCase__ = self.sp_model.encode(__lowerCAmelCase )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase__ = torch.tensor(__lowerCAmelCase )
return token_ids
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.decode(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCamelCase__ = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(__lowerCAmelCase ) + F'{self.bos_token}Bot:'
)
return self.encode(text=__lowerCAmelCase )
| 209 | 1 |
from math import sqrt
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ) and (
number >= 0
), "'number' must been an int and positive"
_SCREAMING_SNAKE_CASE = True
# 0 and 1 are none primes.
if number <= 1:
_SCREAMING_SNAKE_CASE = False
for divisor in range(2 ,int(round(sqrt(snake_case__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_SCREAMING_SNAKE_CASE = False
break
# precondition
assert isinstance(snake_case__ ,snake_case__ ), "'status' must been from type bool"
return status
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_SCREAMING_SNAKE_CASE = list(range(2 ,n + 1 ) )
_SCREAMING_SNAKE_CASE = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(snake_case__ ) ):
for j in range(i + 1 ,len(snake_case__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_SCREAMING_SNAKE_CASE = 0
# filters actual prime numbers.
_SCREAMING_SNAKE_CASE = [x for x in begin_list if x != 0]
# precondition
assert isinstance(snake_case__ ,snake_case__ ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ) and (n > 2), "'N' must been an int and > 2"
_SCREAMING_SNAKE_CASE = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(snake_case__ ):
ans.append(snake_case__ )
# precondition
assert isinstance(snake_case__ ,snake_case__ ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ) and number >= 0, "'number' must been an int and >= 0"
_SCREAMING_SNAKE_CASE = [] # this list will be returns of the function.
# potential prime number factors.
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = number
if number == 0 or number == 1:
ans.append(snake_case__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(snake_case__ ):
while quotient != 1:
if is_prime(snake_case__ ) and (quotient % factor == 0):
ans.append(snake_case__ )
quotient /= factor
else:
factor += 1
else:
ans.append(snake_case__ )
# precondition
assert isinstance(snake_case__ ,snake_case__ ), "'ans' must been from type list"
return ans
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
_SCREAMING_SNAKE_CASE = prime_factorization(snake_case__ )
_SCREAMING_SNAKE_CASE = max(snake_case__ )
# precondition
assert isinstance(snake_case__ ,snake_case__ ), "'ans' must been from type int"
return ans
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
_SCREAMING_SNAKE_CASE = prime_factorization(snake_case__ )
_SCREAMING_SNAKE_CASE = min(snake_case__ )
# precondition
assert isinstance(snake_case__ ,snake_case__ ), "'ans' must been from type int"
return ans
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,snake_case__ ), "compare bust been from type bool"
return number % 2 == 0
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,snake_case__ ), "compare bust been from type bool"
return number % 2 != 0
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
assert (
isinstance(snake_case__ ,snake_case__ ) and (number > 2) and is_even(snake_case__ )
), "'number' must been an int, even and > 2"
_SCREAMING_SNAKE_CASE = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_SCREAMING_SNAKE_CASE = get_prime_numbers(snake_case__ )
_SCREAMING_SNAKE_CASE = len(snake_case__ )
# run variable for while-loops.
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = None
# exit variable. for break up the loops
_SCREAMING_SNAKE_CASE = True
while i < len_pn and loop:
_SCREAMING_SNAKE_CASE = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_SCREAMING_SNAKE_CASE = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(snake_case__ ,snake_case__ )
and (len(snake_case__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
assert (
isinstance(snake_case__ ,snake_case__ )
and isinstance(snake_case__ ,snake_case__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_SCREAMING_SNAKE_CASE = 0
while numbera != 0:
_SCREAMING_SNAKE_CASE = numbera % numbera
_SCREAMING_SNAKE_CASE = numbera
_SCREAMING_SNAKE_CASE = rest
# precondition
assert isinstance(snake_case__ ,snake_case__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
assert (
isinstance(snake_case__ ,snake_case__ )
and isinstance(snake_case__ ,snake_case__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_SCREAMING_SNAKE_CASE = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_SCREAMING_SNAKE_CASE = prime_factorization(snake_case__ )
_SCREAMING_SNAKE_CASE = prime_factorization(snake_case__ )
elif numbera == 1 or numbera == 1:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = max(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_SCREAMING_SNAKE_CASE = prime_fac_a.count(snake_case__ )
_SCREAMING_SNAKE_CASE = prime_fac_a.count(snake_case__ )
for _ in range(max(snake_case__ ,snake_case__ ) ):
ans *= n
else:
_SCREAMING_SNAKE_CASE = prime_fac_a.count(snake_case__ )
for _ in range(snake_case__ ):
ans *= n
done.append(snake_case__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_SCREAMING_SNAKE_CASE = prime_fac_a.count(snake_case__ )
for _ in range(snake_case__ ):
ans *= n
done.append(snake_case__ )
# precondition
assert isinstance(snake_case__ ,snake_case__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ) and (n >= 0), "'number' must been a positive int"
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(snake_case__ ):
ans += 1
# precondition
assert isinstance(snake_case__ ,snake_case__ ) and is_prime(
snake_case__ ), "'ans' must been a prime number and from type int"
return ans
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
assert (
is_prime(snake_case__ ) and is_prime(snake_case__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_SCREAMING_SNAKE_CASE = p_number_a + 1 # jump to the next number
_SCREAMING_SNAKE_CASE = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(snake_case__ ):
number += 1
while number < p_number_a:
ans.append(snake_case__ )
number += 1
# fetch the next prime number.
while not is_prime(snake_case__ ):
number += 1
# precondition
assert (
isinstance(snake_case__ ,snake_case__ )
and ans[0] != p_number_a
and ans[len(snake_case__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ) and (n >= 1), "'n' must been int and >= 1"
_SCREAMING_SNAKE_CASE = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(snake_case__ )
# precondition
assert ans[0] == 1 and ans[len(snake_case__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ) and (
number > 1
), "'number' must been an int and >= 1"
_SCREAMING_SNAKE_CASE = get_divisors(snake_case__ )
# precondition
assert (
isinstance(snake_case__ ,snake_case__ )
and (divisors[0] == 1)
and (divisors[len(snake_case__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
assert (
isinstance(snake_case__ ,snake_case__ )
and isinstance(snake_case__ ,snake_case__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_SCREAMING_SNAKE_CASE = gcd(abs(snake_case__ ) ,abs(snake_case__ ) )
# precondition
assert (
isinstance(snake_case__ ,snake_case__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ) and (n >= 0), "'n' must been a int and >= 0"
_SCREAMING_SNAKE_CASE = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
assert isinstance(snake_case__ ,snake_case__ ) and (n >= 0), "'n' must been an int and >= 0"
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1 # this will be return
for _ in range(n - 1 ):
_SCREAMING_SNAKE_CASE = ans
ans += fiba
_SCREAMING_SNAKE_CASE = tmp
return ans
| 125 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
_SCREAMING_SNAKE_CASE = in_proj_weight[
: encoder_config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViTConfig(image_size=3_84 ,qkv_bias=snake_case__ )
_SCREAMING_SNAKE_CASE = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = """relu"""
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
# load HuggingFace model
_SCREAMING_SNAKE_CASE = ViTModel(snake_case__ ,add_pooling_layer=snake_case__ )
_SCREAMING_SNAKE_CASE = TrOCRForCausalLM(snake_case__ )
_SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel(encoder=snake_case__ ,decoder=snake_case__ )
model.eval()
# load state_dict of original model, rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(snake_case__ ,map_location="""cpu""" ,check_hash=snake_case__ )["""model"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(snake_case__ ,snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ ,snake_case__ ,snake_case__ )
read_in_q_k_v(snake_case__ ,snake_case__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
_SCREAMING_SNAKE_CASE = val
else:
_SCREAMING_SNAKE_CASE = val
# load state dict
model.load_state_dict(snake_case__ )
# Check outputs on an image
_SCREAMING_SNAKE_CASE = ViTImageProcessor(size=encoder_config.image_size )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""roberta-large""" )
_SCREAMING_SNAKE_CASE = TrOCRProcessor(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = processor(images=prepare_img(snake_case__ ) ,return_tensors="""pt""" ).pixel_values
# verify logits
_SCREAMING_SNAKE_CASE = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_SCREAMING_SNAKE_CASE = model(pixel_values=snake_case__ ,decoder_input_ids=snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] ,snake_case__ ,atol=1e-3 ), "First elements of logits not as expected"
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 125 | 1 |
lowerCAmelCase__ :str = {
'''joule''': 1.0,
'''kilojoule''': 1_0_0_0,
'''megajoule''': 1_0_0_0_0_0_0,
'''gigajoule''': 1_0_0_0_0_0_0_0_0_0,
'''wattsecond''': 1.0,
'''watthour''': 3_6_0_0,
'''kilowatthour''': 3_6_0_0_0_0_0,
'''newtonmeter''': 1.0,
'''calorie_nutr''': 4_1_8_6.8,
'''kilocalorie_nutr''': 4_1_8_6_8_0_0.0_0,
'''electronvolt''': 1.602_176_634e-19,
'''britishthermalunit_it''': 1_0_5_5.0_5_5_8_5,
'''footpound''': 1.355_818,
}
def lowerCAmelCase__ ( a__: str , a__: str , a__: float ) -> Any:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_UpperCAmelCase = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 148 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Any = set()
A : int = []
def parse_line(_lowerCamelCase ):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Any = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase ) > 0:
A : Union[str, Any] = "\n".join(_lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(_lowerCamelCase )
buffer.clear()
continue
else:
A : Union[str, Any] = line.strip()
buffer.append(_lowerCamelCase )
if from_gh:
for filename in os.listdir(_lowerCamelCase ):
A : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
else:
try:
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Tuple = set()
A : Union[str, Any] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def UpperCAmelCase ( _lowerCamelCase ):
return values.split("," )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__SCREAMING_SNAKE_CASE = extract_warnings(args.output_dir, args.targets)
__SCREAMING_SNAKE_CASE = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 256 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 256 | 1 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase__: Union[str, Any] = logging.get_logger(__name__)
# General docstring
UpperCamelCase__: int = "RegNetConfig"
# Base docstring
UpperCamelCase__: Optional[int] = "facebook/regnet-y-040"
UpperCamelCase__: List[str] = [1, 1088, 7, 7]
# Image classification docstring
UpperCamelCase__: Dict = "facebook/regnet-y-040"
UpperCamelCase__: Union[str, Any] = "tabby, tabby cat"
UpperCamelCase__: List[str] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : int = 3 , __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : Optional[str] = "relu" , ) -> List[Any]:
super().__init__()
UpperCAmelCase : Tuple = nn.Convad(
__snake_case , __snake_case , kernel_size=__snake_case , stride=__snake_case , padding=kernel_size // 2 , groups=__snake_case , bias=__snake_case , )
UpperCAmelCase : int = nn.BatchNormad(__snake_case )
UpperCAmelCase : List[str] = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : int , __snake_case : Any ) -> Tuple:
UpperCAmelCase : str = self.convolution(__snake_case )
UpperCAmelCase : Optional[int] = self.normalization(__snake_case )
UpperCAmelCase : List[Any] = self.activation(__snake_case )
return hidden_state
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , __snake_case : RegNetConfig ) -> str:
super().__init__()
UpperCAmelCase : Optional[Any] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCAmelCase : Optional[Any] = config.num_channels
def A ( self : Union[str, Any] , __snake_case : Tuple ) -> Tuple:
UpperCAmelCase : List[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCAmelCase : List[str] = self.embedder(__snake_case )
return hidden_state
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : Any , __snake_case : int , __snake_case : int , __snake_case : int = 2 ) -> Optional[Any]:
super().__init__()
UpperCAmelCase : str = nn.Convad(__snake_case , __snake_case , kernel_size=1 , stride=__snake_case , bias=__snake_case )
UpperCAmelCase : List[Any] = nn.BatchNormad(__snake_case )
def A ( self : Any , __snake_case : Tensor ) -> Tensor:
UpperCAmelCase : int = self.convolution(__snake_case )
UpperCAmelCase : Union[str, Any] = self.normalization(__snake_case )
return hidden_state
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : int , __snake_case : int ) -> Tuple:
super().__init__()
UpperCAmelCase : Dict = nn.AdaptiveAvgPoolad((1, 1) )
UpperCAmelCase : Any = nn.Sequential(
nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.ReLU() , nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.Sigmoid() , )
def A ( self : str , __snake_case : Optional[int] ) -> Optional[int]:
# b c h w -> b c 1 1
UpperCAmelCase : str = self.pooler(__snake_case )
UpperCAmelCase : Optional[int] = self.attention(__snake_case )
UpperCAmelCase : int = hidden_state * attention
return hidden_state
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : str , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 ) -> Any:
super().__init__()
UpperCAmelCase : int = in_channels != out_channels or stride != 1
UpperCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
UpperCAmelCase : str = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase : Union[str, Any] = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , )
UpperCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
def A ( self : List[Any] , __snake_case : Any ) -> Union[str, Any]:
UpperCAmelCase : Dict = hidden_state
UpperCAmelCase : int = self.layer(__snake_case )
UpperCAmelCase : Union[str, Any] = self.shortcut(__snake_case )
hidden_state += residual
UpperCAmelCase : str = self.activation(__snake_case )
return hidden_state
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 ) -> List[str]:
super().__init__()
UpperCAmelCase : Any = in_channels != out_channels or stride != 1
UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width )
UpperCAmelCase : List[str] = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase : int = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , )
UpperCAmelCase : List[Any] = ACTaFN[config.hidden_act]
def A ( self : Dict , __snake_case : Any ) -> int:
UpperCAmelCase : List[str] = hidden_state
UpperCAmelCase : Optional[int] = self.layer(__snake_case )
UpperCAmelCase : Tuple = self.shortcut(__snake_case )
hidden_state += residual
UpperCAmelCase : Any = self.activation(__snake_case )
return hidden_state
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 2 , __snake_case : int = 2 , ) -> int:
super().__init__()
UpperCAmelCase : Any = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
UpperCAmelCase : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__snake_case , __snake_case , __snake_case , stride=__snake_case , ) , *[layer(__snake_case , __snake_case , __snake_case ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , __snake_case : str ) -> Any:
UpperCAmelCase : int = self.layers(__snake_case )
return hidden_state
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : RegNetConfig ) -> Dict:
super().__init__()
UpperCAmelCase : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCAmelCase : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__snake_case , config.depths[1:] ):
self.stages.append(RegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case ) )
def A ( self : Dict , __snake_case : Tensor , __snake_case : bool = False , __snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
UpperCAmelCase : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase : str = hidden_states + (hidden_state,)
UpperCAmelCase : Any = stage_module(__snake_case )
if output_hidden_states:
UpperCAmelCase : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = RegNetConfig
lowerCamelCase__ = """regnet"""
lowerCamelCase__ = """pixel_values"""
lowerCamelCase__ = True
def A ( self : List[Any] , __snake_case : Union[str, Any] ) -> Dict:
if isinstance(__snake_case , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : int , __snake_case : Dict , __snake_case : List[str]=False ) -> str:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase : Dict = value
UpperCamelCase__: List[str] = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCamelCase__: Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , A__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : Dict ) -> int:
super().__init__(__snake_case )
UpperCAmelCase : Dict = config
UpperCAmelCase : List[Any] = RegNetEmbeddings(__snake_case )
UpperCAmelCase : int = RegNetEncoder(__snake_case )
UpperCAmelCase : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Optional[Any] , __snake_case : Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
UpperCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase : Optional[Any] = self.embedder(__snake_case )
UpperCAmelCase : Dict = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
UpperCAmelCase : List[Any] = encoder_outputs[0]
UpperCAmelCase : Optional[int] = self.pooler(__snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , A__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : List[str] ) -> Optional[int]:
super().__init__(__snake_case )
UpperCAmelCase : Any = config.num_labels
UpperCAmelCase : List[str] = RegNetModel(__snake_case )
# classification head
UpperCAmelCase : str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.LongTensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
UpperCAmelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase : str = self.regnet(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
UpperCAmelCase : Dict = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase : Union[str, Any] = self.classifier(__snake_case )
UpperCAmelCase : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase : Optional[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase : Any = '''single_label_classification'''
else:
UpperCAmelCase : Any = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCAmelCase : Optional[int] = MSELoss()
if self.num_labels == 1:
UpperCAmelCase : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase : str = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase : Union[str, Any] = CrossEntropyLoss()
UpperCAmelCase : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase : Optional[int] = BCEWithLogitsLoss()
UpperCAmelCase : Optional[Any] = loss_fct(__snake_case , __snake_case )
if not return_dict:
UpperCAmelCase : Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 23 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=False , _a=True , _a=False , _a=False , _a=1_9 , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Union[str, Any]:
_a : Optional[Any] = parent
_a : Union[str, Any] = batch_size
_a : List[Any] = seq_length
_a : Dict = is_training
_a : int = use_input_mask
_a : str = use_token_type_ids
_a : Any = use_labels
_a : List[Any] = vocab_size
_a : Any = hidden_size
_a : int = num_hidden_layers
_a : str = num_attention_heads
_a : Dict = intermediate_size
_a : List[str] = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : Tuple = type_vocab_size
_a : str = type_sequence_label_size
_a : Any = initializer_range
_a : Union[str, Any] = num_labels
_a : Dict = num_choices
_a : Union[str, Any] = scope
def __lowercase ( self ) -> List[Any]:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict = None
if self.use_input_mask:
_a : int = random_attention_mask([self.batch_size, self.seq_length] )
_a : List[Any] = None
_a : Tuple = None
_a : Any = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_a : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> str:
_a : Optional[int] = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_a , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> str:
_a : Union[str, Any] = EsmForProteinFolding(config=_a ).float()
model.to(_a )
model.eval()
_a : str = model(_a , attention_mask=_a )
_a : Union[str, Any] = model(_a )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __lowercase ( self ) -> str:
_a : List[str] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[Any] = config_and_inputs
_a : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = ()
UpperCAmelCase__ : int = {} if is_torch_available() else {}
UpperCAmelCase__ : Optional[int] = False
def __lowercase ( self ) -> List[Any]:
_a : Optional[int] = EsmFoldModelTester(self )
_a : Dict = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@unittest.skip('''Does not support attention outputs''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@require_torch
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> Optional[int]:
_a : Dict = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
_a : Tuple = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : Optional[Any] = model(_a )['''positions''']
_a : Union[str, Any] = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _a , atol=1e-4 ) )
| 235 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : str = dataset
UpperCamelCase__ : Optional[int] = process
UpperCamelCase__ : Optional[int] = params
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : int , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.dataset[i]
UpperCamelCase__ : Dict = self.process(UpperCamelCase__ , **self.params )
return processed
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any]=None ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = loader
UpperCamelCase__ : Optional[Any] = infer
UpperCamelCase__ : Optional[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = loader_batch_size
# Internal bookkeeping
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Optional[int] = None
def __len__( self : Any ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : int ):
'''simple docstring'''
UpperCamelCase__ : Any = iter(self.loader )
return self
def __lowercase ( self : Tuple ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCamelCase__ : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCamelCase__ : Dict = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
UpperCamelCase__ : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCamelCase__ : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCamelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCamelCase__ : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCamelCase__ : Dict = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCamelCase__ : Optional[int] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase__ : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase__ : int = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCamelCase__ : Union[str, Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCamelCase__ : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __lowercase ( self : List[str] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCamelCase__ : Any = next(self.iterator )
UpperCamelCase__ : Union[str, Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
UpperCamelCase__ : Union[str, Any] = processed
else:
UpperCamelCase__ : Optional[int] = list(processed.keys() )[0]
UpperCamelCase__ : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = len(UpperCamelCase__ )
else:
UpperCamelCase__ : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase__ : Any = observed_batch_size
# Setting internal index to unwrap the batch
UpperCamelCase__ : Optional[Any] = processed
UpperCamelCase__ : Any = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = iter(self.loader )
UpperCamelCase__ : Optional[int] = None
return self
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if self.subiterator is None:
UpperCamelCase__ : Optional[int] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCamelCase__ : Union[str, Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCamelCase__ : List[Any] = self.infer(next(self.iterator ) , **self.params )
UpperCamelCase__ : List[Any] = next(self.subiterator )
return processed
class __a ( __SCREAMING_SNAKE_CASE ):
def __iter__( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = iter(self.loader )
return self
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : List[str] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase__ : List[str] = self.loader_batch_item()
UpperCamelCase__ : Optional[Any] = item.pop("is_last" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
UpperCamelCase__ : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
UpperCamelCase__ : Tuple = processed
else:
UpperCamelCase__ : Tuple = list(processed.keys() )[0]
UpperCamelCase__ : Union[str, Any] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Tuple = len(UpperCamelCase__ )
else:
UpperCamelCase__ : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase__ : List[Any] = observed_batch_size
UpperCamelCase__ : Optional[Any] = processed
UpperCamelCase__ : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase__ : str = self.loader_batch_item()
UpperCamelCase__ : str = item.pop("is_last" )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
UpperCamelCase__ : int = processed
UpperCamelCase__ : int = item.pop("is_last" )
accumulator.append(UpperCamelCase__ )
return accumulator
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dataset , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = dataset
UpperCamelCase__ : Tuple = key
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
return self.dataset[i][self.key]
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Dataset , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
UpperCamelCase__ : str = dataset
UpperCamelCase__ : Dict = keya
UpperCamelCase__ : int = keya
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]} | 367 |
import re
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
UpperCamelCase__ : Union[str, Any] = re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] ='''0094702343221'''
print(is_sri_lankan_phone_number(phone)) | 196 | 0 |
from __future__ import annotations
class A__:
"""simple docstring"""
def __init__( self , _lowercase = 0 ) -> List[Any]:
a_ : List[Any] = key
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> Any:
assert isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ )
a_ : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
assert isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ )
a_ : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , _lowercase , _lowercase = 0 ) -> Dict:
assert isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ )
a_ : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a_ : str = ''''''
for ch in content:
ans += chr(ord(lowercase_ ) ^ key )
return ans
def UpperCamelCase__ ( self , _lowercase , _lowercase = 0 ) -> Any:
assert isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ )
a_ : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a_ : Any = ''''''
for ch in content:
ans += chr(ord(lowercase_ ) ^ key )
return ans
def UpperCamelCase__ ( self , _lowercase , _lowercase = 0 ) -> int:
assert isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ )
try:
with open(lowercase_ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowercase_ , lowercase_ ) )
except OSError:
return False
return True
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> Union[str, Any]:
assert isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ )
try:
with open(lowercase_ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowercase_ , lowercase_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 248 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 264 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
class UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
snake_case_ = ['pixel_values']
def __init__( self : Tuple ,A : bool = True ,A : Optional[Dict[str, int]] = None ,A : PILImageResampling = PILImageResampling.BILINEAR ,A : bool = True ,A : Dict[str, int] = None ,A : bool = True ,A : Union[int, float] = 1 / 2_55 ,A : bool = True ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[float, List[float]]] = None ,**A : List[Any] ,):
super().__init__(**lowercase__ )
__A = size if size is not None else {"shortest_edge": 2_56}
__A = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
__A = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
__A = get_size_dict(lowercase__ ,param_name="crop_size" )
__A = do_resize
__A = size
__A = resample
__A = do_center_crop
__A = crop_size
__A = do_rescale
__A = rescale_factor
__A = do_normalize
__A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : Any ,A : np.ndarray ,A : Dict[str, int] ,A : PILImageResampling = PILImageResampling.BICUBIC ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Dict ,):
__A = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__A = get_resize_output_image_size(lowercase__ ,size=size["shortest_edge"] ,default_to_square=lowercase__ )
return resize(lowercase__ ,size=lowercase__ ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def UpperCamelCase_ ( self : Dict ,A : np.ndarray ,A : Dict[str, int] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : int ,):
__A = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowercase__ ,size=(size["height"], size["width"]) ,data_format=lowercase__ ,**lowercase__ )
def UpperCamelCase_ ( self : List[str] ,A : np.ndarray ,A : float ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Optional[int] ):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def UpperCamelCase_ ( self : List[Any] ,A : np.ndarray ,A : Union[float, List[float]] ,A : Union[float, List[float]] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : List[Any] ,):
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def UpperCamelCase_ ( self : Optional[int] ,A : ImageInput ,A : Optional[bool] = None ,A : Dict[str, int] = None ,A : PILImageResampling = None ,A : bool = None ,A : Dict[str, int] = None ,A : Optional[bool] = None ,A : Optional[float] = None ,A : Optional[bool] = None ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[float, List[float]]] = None ,A : Optional[Union[str, TensorType]] = None ,A : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A : Dict ,):
__A = do_resize if do_resize is not None else self.do_resize
__A = size if size is not None else self.size
__A = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
__A = resample if resample is not None else self.resample
__A = do_center_crop if do_center_crop is not None else self.do_center_crop
__A = crop_size if crop_size is not None else self.crop_size
__A = get_size_dict(lowercase__ ,param_name="crop_size" )
__A = do_rescale if do_rescale is not None else self.do_rescale
__A = rescale_factor if rescale_factor is not None else self.rescale_factor
__A = do_normalize if do_normalize is not None else self.do_normalize
__A = image_mean if image_mean is not None else self.image_mean
__A = image_std if image_std is not None else self.image_std
__A = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__A = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__A = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_center_crop:
__A = [self.center_crop(image=lowercase__ ,size=lowercase__ ) for image in images]
if do_rescale:
__A = [self.rescale(image=lowercase__ ,scale=lowercase__ ) for image in images]
if do_normalize:
__A = [self.normalize(image=lowercase__ ,mean=lowercase__ ,std=lowercase__ ) for image in images]
__A = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__A = {"pixel_values": images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[Tuple] = None ):
__A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowercase__ ):
__A = target_sizes.numpy()
__A = []
for idx in range(len(lowercase__ ) ):
__A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="bilinear" ,align_corners=lowercase__ )
__A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
__A = logits.argmax(dim=1 )
__A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 358 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"audio": Audio()} )
snake_case_ = Features({"transcription": Value("string" )} )
snake_case_ = "audio"
snake_case_ = "transcription"
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,A ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
__A = copy.deepcopy(self )
__A = self.input_schema.copy()
__A = features[self.audio_column]
__A = input_schema
return task_template
@property
def UpperCamelCase_ ( self : int ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 124 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_a : str= TypeVar("T")
_a : Union[str, Any]= TypeVar("U")
class UpperCamelCase ( Generic[T, U] ):
def __init__(self : int , _A : T | None , _A : U | None) -> Tuple:
__snake_case : Optional[Any] = key
__snake_case : Optional[int] = val
__snake_case : DoubleLinkedListNode[T, U] | None = None
__snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__(self : List[Any]) -> str:
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next)}, has prev: {bool(self.prev)}"
)
class UpperCamelCase ( Generic[T, U] ):
def __init__(self : int) -> None:
__snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_A , _A)
__snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(_A , _A)
__snake_case , __snake_case : str = self.rear, self.head
def __repr__(self : List[str]) -> str:
__snake_case : List[Any] = ['DoubleLinkedList']
__snake_case : List[str] = self.head
while node.next is not None:
rep.append(str(_A))
__snake_case : Dict = node.next
rep.append(str(self.rear))
return ",\n ".join(_A)
def _lowercase (self : Optional[int] , _A : DoubleLinkedListNode[T, U]) -> None:
__snake_case : str = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__snake_case : Union[str, Any] = node
__snake_case : Any = previous
__snake_case : List[Any] = node
__snake_case : int = self.rear
def _lowercase (self : int , _A : DoubleLinkedListNode[T, U]) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
__snake_case : Union[str, Any] = node.next
__snake_case : List[str] = node.prev
__snake_case : List[str] = None
__snake_case : Any = None
return node
class UpperCamelCase ( Generic[T, U] ):
UpperCAmelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__(self : Union[str, Any] , _A : int) -> List[str]:
__snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
__snake_case : List[Any] = capacity
__snake_case : Optional[Any] = 0
__snake_case : Optional[int] = 0
__snake_case : Optional[Any] = 0
__snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__(self : List[str]) -> str:
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__(self : Tuple , _A : T) -> bool:
return key in self.cache
def _lowercase (self : Dict , _A : T) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
__snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
__snake_case : Dict = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_A)
return node.val
self.miss += 1
return None
def _lowercase (self : List[str] , _A : T , _A : U) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__snake_case : Any = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_A) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__snake_case : List[Any] = DoubleLinkedListNode(_A , _A)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
__snake_case : Optional[Any] = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
__snake_case : Dict = value
self.list.add(_A)
@classmethod
def _lowercase (cls : str , _A : int = 1_28) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(_A : Callable[[T], U]) -> Callable[..., U]:
def cache_decorator_wrapper(*_A : T) -> U:
if func not in cls.decorator_function_to_instance_map:
__snake_case : Dict = LRUCache(_A)
__snake_case : int = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
__snake_case : Optional[Any] = func(*_A)
cls.decorator_function_to_instance_map[func].put(args[0] , _A)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_A , 'cache_info' , _A) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172 | """simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
while a != 0:
__snake_case , __snake_case : Optional[Any] = b % a, a
return b
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
__snake_case : Optional[Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase_ )
__snake_case , __snake_case , __snake_case : Optional[int] = 1, 0, a
__snake_case , __snake_case , __snake_case : int = 0, 1, m
while va != 0:
__snake_case : Union[str, Any] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 172 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
SCREAMING_SNAKE_CASE__ = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
SCREAMING_SNAKE_CASE__ = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case = simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )
snake_case = float(fa_score(y_true=_UpperCAmelCase , y_pred=_UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int ) -> Optional[int]:
"""simple docstring"""
snake_case = float(pearsonr(_UpperCAmelCase , _UpperCAmelCase )[0] )
snake_case = float(spearmanr(_UpperCAmelCase , _UpperCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase , lowerCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCAmelCase , lowerCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCAmelCase , lowerCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCAmelCase , lowerCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 351 | """simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE__ = os.path.join(git_repo_path, "src", "transformers")
SCREAMING_SNAKE_CASE__ = "\n{0} = None\n"
SCREAMING_SNAKE_CASE__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
SCREAMING_SNAKE_CASE__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(lowerCAmelCase )
snake_case = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(lowerCAmelCase , 'tokenizers' )
snake_case = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(lowerCAmelCase , 'tensorflow_text' )
snake_case = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers' )
snake_case = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tensorflow_text' )
snake_case = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers_and_vision' )
def snake_case ( self ):
"""simple docstring"""
snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowerCAmelCase )
self.assertIn('tensorflow_text' , lowerCAmelCase )
self.assertIn('sentencepiece_and_tokenizers' , lowerCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def snake_case ( self ):
"""simple docstring"""
snake_case = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowerCAmelCase , '\nCONSTANT = None\n' )
snake_case = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowerCAmelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowerCAmelCase )
| 149 | 0 |
from maths.prime_check import is_prime
def UpperCamelCase( __UpperCamelCase : int ):
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCamelCase )
if is_prime(__UpperCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 148 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=[1, 2, 1] ,_SCREAMING_SNAKE_CASE=[2, 2, 4] ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=2.0 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=8 ,) -> List[str]:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Optional[int] = embed_dim
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : Optional[Any] = num_heads
UpperCAmelCase_ : Any = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Union[str, Any] = qkv_bias
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : List[str] = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : int = scope
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Any:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = SwinvaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : Dict = SwinvaForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Dict = SwinvaForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Any = self.type_sequence_label_size
UpperCAmelCase_ : Dict = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,embed_dim=37 )
def a__ ( self ) -> str:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def a__ ( self ) -> Dict:
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def a__ ( self ) -> List[str]:
pass
def a__ ( self ) -> Tuple:
UpperCAmelCase_, UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE ,nn.Linear ) )
def a__ ( self ) -> Dict:
UpperCAmelCase_, UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_, UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Any = outputs.attentions
UpperCAmelCase_ : Any = len(self.model_tester.depths )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[str] = config.window_size**2
UpperCAmelCase_ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Optional[int] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
UpperCAmelCase_ : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : str = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Dict = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
UpperCAmelCase_ : List[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Dict = (
reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def a__ ( self ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Tuple = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,(padded_height, padded_width) )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> List[str]:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = SwinvaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_, UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class __a( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase_ : Any = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase_ : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 235 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__a = logging.get_logger(__name__)
__a = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''layoutlmv3'''
def __init__( self ,_SCREAMING_SNAKE_CASE=50_265 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Dict:
super().__init__(
vocab_size=_SCREAMING_SNAKE_CASE ,hidden_size=_SCREAMING_SNAKE_CASE ,num_hidden_layers=_SCREAMING_SNAKE_CASE ,num_attention_heads=_SCREAMING_SNAKE_CASE ,intermediate_size=_SCREAMING_SNAKE_CASE ,hidden_act=_SCREAMING_SNAKE_CASE ,hidden_dropout_prob=_SCREAMING_SNAKE_CASE ,attention_probs_dropout_prob=_SCREAMING_SNAKE_CASE ,max_position_embeddings=_SCREAMING_SNAKE_CASE ,type_vocab_size=_SCREAMING_SNAKE_CASE ,initializer_range=_SCREAMING_SNAKE_CASE ,layer_norm_eps=_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Dict = max_ad_position_embeddings
UpperCAmelCase_ : Any = coordinate_size
UpperCAmelCase_ : Tuple = shape_size
UpperCAmelCase_ : Optional[int] = has_relative_attention_bias
UpperCAmelCase_ : Union[str, Any] = rel_pos_bins
UpperCAmelCase_ : Dict = max_rel_pos
UpperCAmelCase_ : Union[str, Any] = has_spatial_attention_bias
UpperCAmelCase_ : Any = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : List[str] = text_embed
UpperCAmelCase_ : int = visual_embed
UpperCAmelCase_ : int = input_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : int = patch_size
UpperCAmelCase_ : Dict = classifier_dropout
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = version.parse('''1.12''' )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def a__ ( self ) -> float:
return 1e-5
@property
def a__ ( self ) -> int:
return 12
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 40 ,_SCREAMING_SNAKE_CASE = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,'''apply_ocr''' ,_SCREAMING_SNAKE_CASE )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : Optional[Any] = processor.tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Union[str, Any] = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = dict(
processor(
_SCREAMING_SNAKE_CASE ,text=_SCREAMING_SNAKE_CASE ,boxes=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,) )
return inputs | 235 | 1 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
A__ : Optional[int] = ["text", "image", "audio"]
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> List[Any]:
__lowerCamelCase : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
inputs.append(create_inputs(__lowerCAmelCase ) )
else:
raise ValueError(F'Invalid type requested: {input_type}' )
return inputs
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
__lowerCamelCase : Tuple = []
for output in outputs:
if isinstance(__lowerCAmelCase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(__lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(__lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F'Invalid output: {output}' )
return output_types
@is_tool_test
class UpperCAmelCase_ :
"""simple docstring"""
def lowercase_ ( self ) -> str:
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
__lowerCamelCase : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__lowerCamelCase : Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Tuple = create_inputs(self.tool.inputs )
__lowerCamelCase : Optional[Any] = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
__lowerCamelCase : List[Any] = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def lowercase_ ( self ) -> List[Any]:
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Dict = create_inputs(self.tool.inputs )
__lowerCamelCase : Dict = self.tool(*_a )
if not isinstance(_a , _a ):
__lowerCamelCase : Union[str, Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
__lowerCamelCase : Optional[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Dict = create_inputs(self.tool.inputs )
__lowerCamelCase : List[Any] = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__lowerCamelCase : Union[str, Any] = self.tool(*_a )
if not isinstance(_a , _a ):
__lowerCamelCase : Optional[Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 185 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a :List[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _lowercase ( __lowerCAmelCase ) -> List[str]:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
SCREAMING_SNAKE_CASE__ : str = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = PegasusConfig(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusForConditionalGeneration(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE__ : Tuple = v.T
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : Any = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE__ : int = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _lowercase ( __lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.train.list_variables(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Any = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
SCREAMING_SNAKE_CASE__ : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE__ : str = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = array
return tf_weights
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
# save tokenizer first
SCREAMING_SNAKE_CASE__ : Any = Path(__lowerCAmelCase ).parent.name
SCREAMING_SNAKE_CASE__ : Dict = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
SCREAMING_SNAKE_CASE__ : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tf_weights_as_numpy(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE__ : Tuple = task_specific_params
SCREAMING_SNAKE_CASE__ : str = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
a :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
a :Optional[Any] = parser.parse_args()
if args.save_dir is None:
a :List[Any] = Path(args.tf_ckpt_path).parent.name
a :Optional[Any] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 132 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
lowercase_ = text_generator("This is a test" , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
lowercase_ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
UpperCAmelCase , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
lowercase_ = text_generator("This is a test" , do_sample=UpperCAmelCase , num_return_sequences=2 , return_tensors=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
{"generated_token_ids": ANY(UpperCAmelCase )},
{"generated_token_ids": ANY(UpperCAmelCase )},
] , )
lowercase_ = text_generator.model.config.eos_token_id
lowercase_ = "<pad>"
lowercase_ = text_generator(
["This is a test", "This is a second test"] , do_sample=UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCAmelCase , )
self.assertEqual(
UpperCAmelCase , [
[
{"generated_token_ids": ANY(UpperCAmelCase )},
{"generated_token_ids": ANY(UpperCAmelCase )},
],
[
{"generated_token_ids": ANY(UpperCAmelCase )},
{"generated_token_ids": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
lowercase_ = text_generator("This is a test" , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
lowercase_ = text_generator(["This is a test", "This is a second test"] , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = TextGenerationPipeline(model=UpperCAmelCase , tokenizer=UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = "Hello I believe in"
lowercase_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
lowercase_ = text_generator(UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
lowercase_ = text_generator(UpperCAmelCase , stop_sequence=" fe" )
self.assertEqual(UpperCAmelCase , [{"generated_text": "Hello I believe in fe"}] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = text_generator.model
lowercase_ = text_generator.tokenizer
lowercase_ = text_generator("This is a test" )
self.assertEqual(UpperCAmelCase , [{"generated_text": ANY(UpperCAmelCase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
lowercase_ = text_generator("This is a test" , return_full_text=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [{"generated_text": ANY(UpperCAmelCase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
lowercase_ = pipeline(task="text-generation" , model=UpperCAmelCase , tokenizer=UpperCAmelCase , return_full_text=UpperCAmelCase )
lowercase_ = text_generator("This is a test" )
self.assertEqual(UpperCAmelCase , [{"generated_text": ANY(UpperCAmelCase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
lowercase_ = text_generator("This is a test" , return_full_text=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [{"generated_text": ANY(UpperCAmelCase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
lowercase_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
[{"generated_text": ANY(UpperCAmelCase )}, {"generated_text": ANY(UpperCAmelCase )}],
[{"generated_text": ANY(UpperCAmelCase )}, {"generated_text": ANY(UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowercase_ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
[{"generated_text": ANY(UpperCAmelCase )}, {"generated_text": ANY(UpperCAmelCase )}],
[{"generated_text": ANY(UpperCAmelCase )}, {"generated_text": ANY(UpperCAmelCase )}],
] , )
with self.assertRaises(UpperCAmelCase ):
lowercase_ = text_generator("test" , return_full_text=UpperCAmelCase , return_text=UpperCAmelCase )
with self.assertRaises(UpperCAmelCase ):
lowercase_ = text_generator("test" , return_full_text=UpperCAmelCase , return_tensors=UpperCAmelCase )
with self.assertRaises(UpperCAmelCase ):
lowercase_ = text_generator("test" , return_text=UpperCAmelCase , return_tensors=UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowercase_ = text_generator("" )
self.assertEqual(UpperCAmelCase , [{"generated_text": ANY(UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowercase_ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowercase_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
lowercase_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(UpperCAmelCase ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def A__ ( self ) -> Tuple:
'''simple docstring'''
import torch
# Classic `model_kwargs`
lowercase_ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowercase_ = pipe("This is a test" )
self.assertEqual(
UpperCAmelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowercase_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowercase_ = pipe("This is a test" )
self.assertEqual(
UpperCAmelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowercase_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowercase_ = pipe("This is a test" )
self.assertEqual(
UpperCAmelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
lowercase_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def A__ ( self ) -> Tuple:
'''simple docstring'''
import torch
lowercase_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=UpperCAmelCase , top_p=0.5 )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = "Hello world"
lowercase_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
lowercase_ = logging.get_logger("transformers.generation.tf_utils" )
else:
lowercase_ = logging.get_logger("transformers.generation.utils" )
lowercase_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(UpperCAmelCase ) as cl:
lowercase_ = text_generator(UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(UpperCAmelCase ) as cl:
lowercase_ = text_generator(UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(UpperCAmelCase , cl.out )
with CaptureLogger(UpperCAmelCase ) as cl:
lowercase_ = text_generator(UpperCAmelCase , max_length=10 )
self.assertNotIn(UpperCAmelCase , cl.out )
| 368 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297 | 0 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Tuple = logging.get_logger(__name__)
A: Tuple = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = 'efficientformer'
def __init__( self , _SCREAMING_SNAKE_CASE = [3, 2, 6, 4] , _SCREAMING_SNAKE_CASE = [48, 96, 224, 448] , _SCREAMING_SNAKE_CASE = [True, True, True, True] , _SCREAMING_SNAKE_CASE = 448 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 7 , _SCREAMING_SNAKE_CASE = 5 , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1E-5 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 1E-12 , _SCREAMING_SNAKE_CASE = 224 , _SCREAMING_SNAKE_CASE = 1E-05 , **_SCREAMING_SNAKE_CASE , ) -> None:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : int = hidden_sizes
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : str = depths
UpperCAmelCase : Any = mlp_expansion_ratio
UpperCAmelCase : List[Any] = downsamples
UpperCAmelCase : str = dim
UpperCAmelCase : List[Any] = key_dim
UpperCAmelCase : Tuple = attention_ratio
UpperCAmelCase : Optional[Any] = resolution
UpperCAmelCase : List[Any] = pool_size
UpperCAmelCase : Any = downsample_patch_size
UpperCAmelCase : Tuple = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : Dict = drop_path_rate
UpperCAmelCase : Union[str, Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : Optional[int] = use_layer_scale
UpperCAmelCase : Dict = layer_scale_init_value
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : Tuple = batch_norm_eps
| 109 |
"""simple docstring"""
A: Union[str, Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def _snake_case ( UpperCamelCase : float ):
assert type(UpperCamelCase ) in (int, float) and decimal == int(UpperCamelCase )
UpperCAmelCase : str = int(UpperCamelCase )
UpperCAmelCase : Optional[int] = """"""
UpperCAmelCase : List[str] = False
if decimal < 0:
UpperCAmelCase : Any = True
decimal *= -1
while decimal > 0:
UpperCAmelCase , UpperCAmelCase : Dict = divmod(UpperCamelCase , 16 )
UpperCAmelCase : Union[str, Any] = values[remainder] + hexadecimal
UpperCAmelCase : int = """0x""" + hexadecimal
if negative:
UpperCAmelCase : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 1 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : List[str] = logging.getLogger()
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : Tuple =argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase__ : Tuple =parser.parse_args()
return args.f
def _lowerCAmelCase ( UpperCAmelCase : str ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] ={}
UpperCamelCase__ : Optional[int] =os.path.join(UpperCAmelCase , '''all_results.json''' )
if os.path.exists(UpperCAmelCase ):
with open(UpperCAmelCase , '''r''' ) as f:
UpperCamelCase__ : List[Any] =json.load(UpperCAmelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : Any =torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
_SCREAMING_SNAKE_CASE : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __a ( snake_case__ ):
"""simple docstring"""
@classmethod
def _lowerCAmelCase ( cls : Any ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCamelCase__ : Optional[int] =tempfile.mkdtemp()
UpperCamelCase__ : int =os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
UpperCamelCase__ : int =['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _lowerCAmelCase ( cls : Dict ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : Optional[int] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Tuple =f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
UpperCamelCase__ : Optional[Any] =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : List[str] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Any =f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCamelCase__ : Optional[int] =get_results(lowercase_ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : Dict =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[Any] =f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : str =get_results(lowercase_ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase ( self : Dict ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase__ : Optional[Any] =7 if get_gpu_count() > 1 else 2
UpperCamelCase__ : Dict =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : str =f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : str =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : Optional[Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[Any] =f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : List[Any] =get_results(lowercase_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Union[str, Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : int =f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : Optional[int] =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Optional[int] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : int =f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : Optional[int] =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Union[str, Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Any =f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : Tuple =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''translation_no_trainer''' ) ) )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : Any =logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
UpperCamelCase__ : List[str] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Tuple =f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase__ : str =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.1_0 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : str =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : str =f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
UpperCamelCase__ : int =get_results(lowercase_ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''image_classification_no_trainer''' ) ) )
| 157 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : Optional[int]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Tuple =hans_processors[task]()
UpperCamelCase__ : Union[str, Any] =os.path.join(
lowercase_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCamelCase__ : int =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =label_list[2], label_list[1]
UpperCamelCase__ : List[Any] =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ : Any =cached_features_file + '''.lock'''
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
UpperCamelCase__ : Optional[int] =torch.load(lowercase_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
UpperCamelCase__ : str =(
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('''Training examples: %s''' , len(lowercase_ ) )
UpperCamelCase__ : Tuple =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('''Saving features into cached file %s''' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ):
return len(self.features )
def __getitem__( self : Optional[int] , lowercase_ : Optional[Any] ):
return self.features[i]
def _lowerCAmelCase ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Any , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : Union[str, Any]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Any =hans_processors[task]()
UpperCamelCase__ : Tuple =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Tuple =label_list[2], label_list[1]
UpperCamelCase__ : Union[str, Any] =label_list
UpperCamelCase__ : Any =processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCamelCase__ : Union[str, Any] =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase__ : Optional[Any] =tf.data.Dataset.from_generator(
lowercase_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self : Optional[Any] ):
return self.dataset
def __len__( self : str ):
return len(self.features )
def __getitem__( self : List[str] , lowercase_ : Dict ):
return self.features[i]
def _lowerCAmelCase ( self : Dict ):
return self.label_list
class __a ( snake_case__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Union[str, Any] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Optional[int] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _lowerCAmelCase ( self : List[Any] ):
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
UpperCamelCase__ : Tuple =[]
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCamelCase__ : str ='''%s-%s''' % (set_type, line[0])
UpperCamelCase__ : str =line[5]
UpperCamelCase__ : Any =line[6]
UpperCamelCase__ : Optional[int] =line[7][2:] if line[7].startswith('''ex''' ) else line[7]
UpperCamelCase__ : str =line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def _lowerCAmelCase ( UpperCAmelCase : List[InputExample] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : PreTrainedTokenizer , ):
'''simple docstring'''
UpperCamelCase__ : List[str] ={label: i for i, label in enumerate(UpperCAmelCase )}
UpperCamelCase__ : int =[]
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
UpperCamelCase__ : str =tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , truncation=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , )
UpperCamelCase__ : str =label_map[example.label] if example.label in label_map else 0
UpperCamelCase__ : int =int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
_SCREAMING_SNAKE_CASE : List[str] = {
"""hans""": 3,
}
_SCREAMING_SNAKE_CASE : Tuple = {
"""hans""": HansProcessor,
}
| 157 | 1 |
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> bool:
return str(UpperCAmelCase ) == str(UpperCAmelCase )[::-1]
def UpperCAmelCase ( UpperCAmelCase ) -> int:
return int(UpperCAmelCase ) + int(str(UpperCAmelCase )[::-1] )
def UpperCAmelCase ( UpperCAmelCase = 10000 ) -> int:
snake_case_ = []
for num in range(1 , UpperCAmelCase ):
snake_case_ = 0
snake_case_ = num
while iterations < 50:
snake_case_ = sum_reverse(UpperCAmelCase )
iterations += 1
if is_palindrome(UpperCAmelCase ):
break
else:
lychrel_nums.append(UpperCAmelCase )
return len(UpperCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 69 | """simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
# Base Case
if curr_ind == len(UpperCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCAmelCase ) ):
if valid_connection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# Insert current vertex into path as next transition
snake_case_ = next_ver
# Validate created path
if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ = -1
return False
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 0 ) -> list[int]:
snake_case_ = [-1] * (len(UpperCAmelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ = snake_case_ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , 1 ) else []
| 69 | 1 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str ) -> int:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Tuple=True ) -> List[Any]:
model.train()
__lowerCAmelCase : str = model(__lowerCAmelCase )
__lowerCAmelCase : Optional[int] = F.mse_loss(__lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any]=False ) -> Any:
set_seed(42 )
__lowerCAmelCase : Dict = RegressionModel()
__lowerCAmelCase : Optional[Any] = deepcopy(__lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = RegressionDataset(length=80 )
__lowerCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
__lowerCAmelCase : List[Any] = AdamW(params=model.parameters() , lr=1e-3 )
__lowerCAmelCase : List[str] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__lowerCAmelCase : Optional[Any] = LambdaLR(__lowerCAmelCase , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__lowerCAmelCase : str = LambdaLR(__lowerCAmelCase , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] ) -> Tuple:
# Test when on a single CPU or GPU that the context manager does nothing
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = get_training_setup(__lowerCAmelCase )
# Use a single batch
__lowerCAmelCase , __lowerCAmelCase : List[Any] = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase : str = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
__lowerCAmelCase : Union[str, Any] = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] ) -> Any:
# Test on distributed setup that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = get_training_setup(__lowerCAmelCase )
# Use a single batch
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase : str = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
__lowerCAmelCase : Any = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any=False , SCREAMING_SNAKE_CASE :Optional[Any]=False ) -> List[Any]:
__lowerCAmelCase : str = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = get_training_setup(__lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
__lowerCAmelCase , __lowerCAmelCase : str = batch.values()
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase : Any = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
__lowerCAmelCase : Tuple = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[Any]=False ) -> Optional[int]:
__lowerCAmelCase : Tuple = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = get_training_setup(__lowerCAmelCase , __lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
__lowerCAmelCase , __lowerCAmelCase : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase : List[str] = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
__lowerCAmelCase : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( ) -> Any:
__lowerCAmelCase : Optional[Any] = Accelerator()
__lowerCAmelCase : Optional[Any] = RegressionDataset(length=80 )
__lowerCAmelCase : Optional[Any] = DataLoader(__lowerCAmelCase , batch_size=16 )
__lowerCAmelCase : Dict = RegressionDataset(length=96 )
__lowerCAmelCase : Dict = DataLoader(__lowerCAmelCase , batch_size=16 )
__lowerCAmelCase , __lowerCAmelCase : List[str] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if iteration < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if batch_num < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__lowerCAmelCase : List[Any] = Accelerator()
__lowerCAmelCase : Dict = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 371 |
def _SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
_UpperCAmelCase = generate_large_matrix()
_UpperCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> None:
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[int] ) -> int:
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__lowerCAmelCase : Any = (left + right) // 2
__lowerCAmelCase : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : Any = 0
__lowerCAmelCase : Union[str, Any] = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : str = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : List[str] = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def _SCREAMING_SNAKE_CASE ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
__lowerCAmelCase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__lowerCAmelCase : Optional[int] = timeit(F'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 232 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=[1, 16, 4, 4] , lowercase=None , ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : Optional[int] = batch_size
a__ : Any = image_size
a__ : Optional[Any] = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : List[str] = hidden_size
a__ : Tuple = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : Optional[int] = hidden_act
a__ : Optional[Any] = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : Any = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Tuple = scope
a__ : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
a__ : Any = (self.image_size // 32) ** 2
a__ : List[Any] = num_patches + 1
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : int = None
if self.use_labels:
a__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowercase , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : List[str] = ViTHybridModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Union[str, Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = self.type_sequence_label_size
a__ : Union[str, Any] = ViTHybridForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Tuple = model(lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[str] = self.prepare_config_and_inputs()
a__ , a__ , a__ : Union[str, Any] = config_and_inputs
a__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__A : List[str] = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__A : Any = False
__A : Optional[int] = False
__A : Optional[Any] = False
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = ViTHybridModelTester(self)
a__ : Any = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> int:
'''simple docstring'''
a__ , a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
a__ : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[Any] = [*signature.parameters.keys()]
a__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = _config_zero_init(lowercase)
for model_class in self.all_model_classes:
a__ : List[Any] = model_class(config=lowercase)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
a__ : Dict = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = ViTHybridModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> int:
a__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
lowercase)
a__ : List[str] = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Any = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowercase)
# verify the logits
a__ : Optional[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Any = torch.tensor([-1.90_90, -0.49_93, -0.23_89]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
@slow
@require_accelerate
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384')
a__ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto')
a__ : Any = prepare_img()
a__ : str = image_processor(images=lowercase , return_tensors='pt')
a__ : List[Any] = model(**lowercase)
a__ : int = outputs.logits
# model predicts one of the 1000 ImageNet classes
a__ : List[str] = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat')
| 99 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
__SCREAMING_SNAKE_CASE , {
"""score""": ANY(__SCREAMING_SNAKE_CASE ),
"""label""": ANY(__SCREAMING_SNAKE_CASE ),
"""box""": {"""xmin""": ANY(__SCREAMING_SNAKE_CASE ), """ymin""": ANY(__SCREAMING_SNAKE_CASE ), """xmax""": ANY(__SCREAMING_SNAKE_CASE ), """ymax""": ANY(__SCREAMING_SNAKE_CASE )},
} , )
import datasets
__SCREAMING_SNAKE_CASE = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__SCREAMING_SNAKE_CASE = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__SCREAMING_SNAKE_CASE = object_detector(__SCREAMING_SNAKE_CASE , threshold=0.0 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for outputs in batch_outputs:
self.assertGreater(len(__SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
__SCREAMING_SNAKE_CASE , {
"""score""": ANY(__SCREAMING_SNAKE_CASE ),
"""label""": ANY(__SCREAMING_SNAKE_CASE ),
"""box""": {"""xmin""": ANY(__SCREAMING_SNAKE_CASE ), """ymin""": ANY(__SCREAMING_SNAKE_CASE ), """xmax""": ANY(__SCREAMING_SNAKE_CASE ), """ymax""": ANY(__SCREAMING_SNAKE_CASE )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@require_torch
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
__SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0.9985
__SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """Narsil/layoutlmv3-finetuned-funsd"""
__SCREAMING_SNAKE_CASE = 0.9993
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=__SCREAMING_SNAKE_CASE , threshold=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 267 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case ={
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__snake_case ={
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
lowerCamelCase : Optional[int] = RobertaTokenizer
def __init__( self : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]="replace" , UpperCAmelCase__ : int="<s>" , UpperCAmelCase__ : List[str]="</s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : List[Any]="<unk>" , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : Dict="<mask>" , UpperCAmelCase__ : int=False , UpperCAmelCase__ : List[Any]=True , **UpperCAmelCase__ : Dict , ) -> Dict:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : Dict ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str ) -> str:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : List[Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple=None ) -> int:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 55 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 55 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def a_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
A__ = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=snake_case__ , dtype=jnp.bfloataa )
A__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case__ , from_pt=snake_case__ , dtype=jnp.bfloataa )
A__ = controlnet_params
A__ = '''bird'''
A__ = jax.device_count()
A__ = pipe.prepare_text_inputs([prompts] * num_samples )
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
A__ = pipe.prepare_image_inputs([canny_image] * num_samples )
A__ = jax.random.PRNGKey(0 )
A__ = jax.random.split(snake_case__ , jax.device_count() )
A__ = replicate(snake_case__ )
A__ = shard(snake_case__ )
A__ = shard(snake_case__ )
A__ = pipe(
prompt_ids=snake_case__ , image=snake_case__ , params=snake_case__ , prng_seed=snake_case__ , num_inference_steps=50 , jit=snake_case__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ = images[0, 2_53:2_56, 2_53:2_56, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
A__ = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case__ , dtype=jnp.bfloataa )
A__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case__ , from_pt=snake_case__ , dtype=jnp.bfloataa )
A__ = controlnet_params
A__ = '''Chef in the kitchen'''
A__ = jax.device_count()
A__ = pipe.prepare_text_inputs([prompts] * num_samples )
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
A__ = pipe.prepare_image_inputs([pose_image] * num_samples )
A__ = jax.random.PRNGKey(0 )
A__ = jax.random.split(snake_case__ , jax.device_count() )
A__ = replicate(snake_case__ )
A__ = shard(snake_case__ )
A__ = shard(snake_case__ )
A__ = pipe(
prompt_ids=snake_case__ , image=snake_case__ , params=snake_case__ , prng_seed=snake_case__ , num_inference_steps=50 , jit=snake_case__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ = images[0, 2_53:2_56, 2_53:2_56, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 274 | from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ )
else:
UpperCAmelCase : Union[str, Any] =None
UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : VQModel
__lowerCamelCase : CLIPTextModel
__lowerCamelCase : CLIPTokenizer
__lowerCamelCase : TransformeraDModel
__lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings
__lowerCamelCase : VQDiffusionScheduler
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1
# get prompt text embeddings
UpperCAmelCase : Optional[int] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase : int =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 )
else:
UpperCAmelCase : str =[''''''] * batch_size
UpperCAmelCase : Tuple =text_input_ids.shape[-1]
UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , )
UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1]
UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 )
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =1
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Tuple =len(snake_case__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' )
UpperCAmelCase : Tuple =batch_size * num_images_per_prompt
UpperCAmelCase : List[str] =guidance_scale > 1.0
UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(snake_case__ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1
UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCAmelCase : Any =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ , device=self.device )
UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device )
UpperCAmelCase : Optional[int] =latents
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 )
UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ )
UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase : Optional[Any] =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim
UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ )
UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample
UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ )
UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ )
UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ )
UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase : int =keep_mask[:, :-1, :]
UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase : Dict =log_p_x_0.clone()
UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0)
return rv
| 348 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_a : str = None
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = '▁'
_a : int = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_a : str = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
_a : Union[str, Any] = {
'google/pegasus-xsum': 512,
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = PegasusTokenizer
_UpperCamelCase : List[Any] = ["input_ids", "attention_mask"]
def __init__( self , a__=None , a__=None , a__="<pad>" , a__="</s>" , a__="<unk>" , a__="<mask_2>" , a__="<mask_1>" , a__=None , a__=103 , **a__ , ):
_lowerCAmelCase : Optional[int] = offset
if additional_special_tokens is not None:
if not isinstance(a__ , a__ ):
raise TypeError(
F"additional_special_tokens should be of type {type(a__ )}, but is"
F" {type(a__ )}" )
_lowerCAmelCase : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(a__ ) , self.offset - 1 )
]
if len(set(a__ ) ) != len(a__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
_lowerCAmelCase : Dict = additional_special_tokens_extended
else:
_lowerCAmelCase : Any = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
a__ , tokenizer_file=a__ , pad_token=a__ , eos_token=a__ , unk_token=a__ , mask_token=a__ , mask_token_sent=a__ , offset=a__ , additional_special_tokens=a__ , **a__ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : List[Any] = False if not self.vocab_file else True
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
F" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return self._special_token_mask(a__ )
elif token_ids_a is None:
return self._special_token_mask(a__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __A ( self , a__ , a__=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __A ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : List[Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 362 | """simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_a : Union[str, Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
def __init__( self , a__ , a__=16 , a__=13 , a__=7 , a__=14 , a__=10 , a__=19 , a__=5 , a__=4 , a__=True , a__=16 , a__=2 , a__=4 , a__=4 , a__="gelu" , a__=0.1 , a__=0.1 , a__=[1, 2, 3, 4, 5] , a__=25 , a__=5 , ):
_lowerCAmelCase : Union[str, Any] = d_model
_lowerCAmelCase : int = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : Optional[int] = prediction_length
_lowerCAmelCase : int = context_length
_lowerCAmelCase : Optional[Any] = cardinality
_lowerCAmelCase : Tuple = num_time_features
_lowerCAmelCase : str = lags_sequence
_lowerCAmelCase : int = embedding_dimension
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = context_length
_lowerCAmelCase : Optional[int] = prediction_length + label_length
_lowerCAmelCase : Dict = label_length
_lowerCAmelCase : Dict = moving_average
_lowerCAmelCase : Union[str, Any] = autocorrelation_factor
def __A ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = config.context_length + max(config.lags_sequence )
_lowerCAmelCase : int = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowerCAmelCase : int = floats_tensor([self.batch_size, _past_length] )
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowerCAmelCase : Any = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, config.prediction_length] )
_lowerCAmelCase : Dict = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def __A ( self ):
_lowerCAmelCase : Any = self.get_config()
_lowerCAmelCase : str = self.prepare_autoformer_inputs_dict(a__ )
return config, inputs_dict
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = AutoformerModel(config=a__ ).to(a__ ).eval()
_lowerCAmelCase : int = model(**a__ )
_lowerCAmelCase : List[str] = outputs.encoder_last_hidden_state
_lowerCAmelCase : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[int] = model.get_encoder()
encoder.save_pretrained(a__ )
_lowerCAmelCase : Optional[int] = AutoformerEncoder.from_pretrained(a__ ).to(a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = model.create_network_inputs(**a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowerCAmelCase : Any = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_lowerCAmelCase : Dict = encoder(inputs_embeds=a__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_lowerCAmelCase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_lowerCAmelCase : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_lowerCAmelCase : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_lowerCAmelCase : Optional[int] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[int] = model.get_decoder()
decoder.save_pretrained(a__ )
_lowerCAmelCase : Any = AutoformerDecoder.from_pretrained(a__ ).to(a__ )
_lowerCAmelCase : List[Any] = decoder(
trend=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase : int = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
def __A ( self ):
_lowerCAmelCase : Tuple = AutoformerModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_class.from_pretrained(a__ , output_loading_info=a__ )
self.assertEqual(info["""missing_keys"""] , [] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Dict = inspect.signature(getattr(a__ , """forward""" ) )
# The main input is the name of the argument after `self`
_lowerCAmelCase : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(a__ )
_lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : List[str] = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(a__ )] , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = getattr(self.model_tester , """seq_length""" , a__ )
_lowerCAmelCase : List[str] = getattr(self.model_tester , """decoder_seq_length""" , a__ )
_lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , a__ )
_lowerCAmelCase : int = getattr(self.model_tester , """d_model""" , a__ )
_lowerCAmelCase : Optional[Any] = getattr(self.model_tester , """num_attention_heads""" , a__ )
_lowerCAmelCase : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : List[Any] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : int = True
_lowerCAmelCase : Optional[Any] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_lowerCAmelCase : Dict = len(a__ )
_lowerCAmelCase : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a__ , a__ )
# decoder attentions
_lowerCAmelCase : int = outputs.decoder_attentions
self.assertIsInstance(a__ , (list, tuple) )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_lowerCAmelCase : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(a__ , (list, tuple) )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[int] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 2 , len(a__ ) )
_lowerCAmelCase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __A ( self ):
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any="train-batch.pt" ) -> Optional[int]:
_lowerCAmelCase : List[Any] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" ,filename=_lowerCamelCase ,repo_type="""dataset""" )
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase ,map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : List[Any] = prepare_batch()
with torch.no_grad():
_lowerCAmelCase : str = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_lowerCAmelCase : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : Optional[int] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=a__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a__ , atol=a__ ) )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : Union[str, Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_lowerCAmelCase : str = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_lowerCAmelCase : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=a__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a__ , atol=a__ ) )
def __A ( self ):
_lowerCAmelCase : List[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : str = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_lowerCAmelCase : str = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_lowerCAmelCase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , a__ )
_lowerCAmelCase : Optional[Any] = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=a__ )
_lowerCAmelCase : Optional[int] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a__ , rtol=1e-1 ) )
| 126 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Initialise PyTorch model
lowerCamelCase_ = TaConfig.from_json_file(lowerCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase_ = TaForConditionalGeneration(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 19 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __magic_name__ ( lowercase ):
return np.maximum(0 , lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 173 | 0 |
"""simple docstring"""
def __lowercase ( _a = 4_000_000 ):
snake_case_ : Dict = []
snake_case_ : List[str] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_a )
snake_case_ : str = b, a + b
return sum(_a )
if __name__ == "__main__":
print(f'{solution() = }')
| 352 |
"""simple docstring"""
import os
def __lowercase ( _a ):
snake_case_ : Tuple = len(grid[0] )
snake_case_ : Optional[int] = len(_a )
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_a ):
for j in range(n_rows - 3 ):
snake_case_ : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case_ : int = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case_ : Dict = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case_ : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case_ : List[str] = max(
_a , _a , _a , _a )
if max_product > largest:
snake_case_ : str = max_product
return largest
def __lowercase ( ):
snake_case_ : Tuple = []
with open(os.path.dirname(_a ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
snake_case_ : List[str] = [[int(_a ) for i in grid[j]] for j in range(len(_a ) )]
return largest_product(_a )
if __name__ == "__main__":
print(solution())
| 155 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Construct model
if openai_config_file == "":
lowercase :Any = OpenAIGPTConfig()
else:
lowercase :List[str] = OpenAIGPTConfig.from_json_file(__a )
lowercase :str = OpenAIGPTModel(__a )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__a, __a, __a )
# Save pytorch-model
lowercase :Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowercase :Union[str, Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict(), __a )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__a, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_UpperCAmelCase : str = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 236 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__: Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__: Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} )
__magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} )
__magic_name__: bool = field(
default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__a , __a , __a ):
assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__a , __a , getattr(__a , __a ) )
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
snake_case_ : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a , __a ):
snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
snake_case_ : List[Any] = SeqaSeqDataset
# Get datasets
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
snake_case_ : List[str] = (
dataset_class(
__a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
snake_case_ : Any = (
build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None
)
snake_case_ : List[str] = SeqaSeqTrainer(
model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator(
__a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , )
snake_case_ : Optional[int] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
snake_case_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
snake_case_ : Tuple = train_result.metrics
snake_case_ : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , __a , training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' )
snake_case_ : str = data_args.n_val
snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('*** Predict ***' )
snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' )
snake_case_ : Union[str, Any] = test_output.metrics
snake_case_ : int = data_args.n_test
if trainer.is_world_process_zero():
snake_case_ : List[str] = round(metrics['test_loss'] , 4 )
handle_metrics('test' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
snake_case_ : Any = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
snake_case_ : Any = lmap(str.strip , __a )
write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 327 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
_UpperCAmelCase : Any = size if size is not None else {"shortest_edge": 18}
_UpperCAmelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : int = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Optional[Any] = min_resolution
_UpperCAmelCase : Any = max_resolution
_UpperCAmelCase : Union[str, Any] = do_resize
_UpperCAmelCase : List[Any] = size
_UpperCAmelCase : List[str] = do_center_crop
_UpperCAmelCase : Optional[int] = crop_size
_UpperCAmelCase : List[str] = do_normalize
_UpperCAmelCase : Union[str, Any] = image_mean
_UpperCAmelCase : Optional[int] = image_std
def _UpperCAmelCase ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a ( UpperCAmelCase , unittest.TestCase ):
_lowercase = LevitImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = LevitImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : int = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Optional[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 189 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( UpperCAmelCase ):
_lowercase = (PNDMScheduler,)
_lowercase = (("num_inference_steps", 5_0),)
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**A_ )
return config
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : int = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Dict = 0.1 * sample
_UpperCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Dict = self.get_scheduler_config(**A_ )
_UpperCAmelCase : Any = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
new_scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Union[str, Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = dict(self.forward_default_kwargs )
_UpperCAmelCase : int = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Any = 0.1 * sample
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A_ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : List[Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Union[str, Any] = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : List[str] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(**A_ )
_UpperCAmelCase : List[str] = scheduler_class(**A_ )
_UpperCAmelCase : List[str] = 10
_UpperCAmelCase : Optional[int] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_UpperCAmelCase : Dict = model(A_ , A_ )
_UpperCAmelCase : int = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_UpperCAmelCase : Any = model(A_ , A_ )
_UpperCAmelCase : Any = scheduler.step_plms(A_ , A_ , A_ ).prev_sample
return sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_UpperCAmelCase : str = kwargs.pop("num_inference_steps" , A_ )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(A_ , "set_timesteps" ):
scheduler.set_timesteps(A_ )
elif num_inference_steps is not None and not hasattr(A_ , "set_timesteps" ):
_UpperCAmelCase : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCAmelCase : Any = dummy_past_residuals[:]
_UpperCAmelCase : Any = scheduler.step_prk(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_prk(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_UpperCAmelCase : Optional[int] = scheduler.step_plms(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_UpperCAmelCase : str = scheduler_class(**A_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = 27
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : str = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_UpperCAmelCase : Dict = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(A_ ):
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**A_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.full_loop()
_UpperCAmelCase : int = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Dict = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Dict = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 189 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Dict = BarthezTokenizer
UpperCAmelCase__ : Optional[Any] = BarthezTokenizerFast
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = True
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''')
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_snake_case)
UpperCAmelCase_ = tokenizer
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-1] , '''<mask>''')
self.assertEqual(len(_snake_case) , 101122)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122)
@require_torch
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = [0, 57, 3018, 70307, 91, 2]
UpperCAmelCase_ = self.tokenizer(
_snake_case , max_length=len(_snake_case) , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''')
self.assertIsInstance(_snake_case , _snake_case)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
UpperCAmelCase_ = rust_tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCAmelCase_ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=_snake_case , )
| 51 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 0 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowercase__ : Any = '''PoolFormerConfig'''
# Base docstring
lowercase__ : Any = '''sail/poolformer_s12'''
lowercase__ : List[Any] = [1, 5_12, 7, 7]
# Image classification docstring
lowercase__ : str = '''sail/poolformer_s12'''
lowercase__ : Union[str, Any] = '''tabby, tabby cat'''
lowercase__ : str = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __lowercase ( _a , _a = 0.0 , _a = False ):
if drop_prob == 0.0 or not training:
return input
snake_case_ : Any = 1 - drop_prob
snake_case_ : Any = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case_ : List[str] = keep_prob + torch.rand(_a , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case_ : Any = input.div(_a ) * random_tensor
return output
class _UpperCAmelCase ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : Optional[float] = None ):
super().__init__()
snake_case_ : Optional[int] = drop_prob
def _snake_case ( self : Dict , lowercase_ : torch.Tensor ):
return drop_path(lowercase_ , self.drop_prob , self.training )
def _snake_case ( self : Union[str, Any] ):
return "p={}".format(self.drop_prob )
class _UpperCAmelCase ( nn.Module):
def __init__( self : Any , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str=None ):
super().__init__()
snake_case_ : str = patch_size if isinstance(lowercase_ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case_ : Any = stride if isinstance(lowercase_ , collections.abc.Iterable ) else (stride, stride)
snake_case_ : Optional[int] = padding if isinstance(lowercase_ , collections.abc.Iterable ) else (padding, padding)
snake_case_ : Any = nn.Convad(lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=lowercase_ )
snake_case_ : int = norm_layer(lowercase_ ) if norm_layer else nn.Identity()
def _snake_case ( self : int , lowercase_ : Optional[Any] ):
snake_case_ : Dict = self.projection(lowercase_ )
snake_case_ : int = self.norm(lowercase_ )
return embeddings
class _UpperCAmelCase ( nn.GroupNorm):
def __init__( self : Optional[Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ):
super().__init__(1 , lowercase_ , **lowercase_ )
class _UpperCAmelCase ( nn.Module):
def __init__( self : Tuple , lowercase_ : List[Any] ):
super().__init__()
snake_case_ : List[str] = nn.AvgPoolad(lowercase_ , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase_ )
def _snake_case ( self : Union[str, Any] , lowercase_ : List[Any] ):
return self.pool(lowercase_ ) - hidden_states
class _UpperCAmelCase ( nn.Module):
def __init__( self : Any , lowercase_ : Tuple , lowercase_ : str , lowercase_ : List[str] , lowercase_ : str ):
super().__init__()
snake_case_ : Any = nn.Convad(lowercase_ , lowercase_ , 1 )
snake_case_ : Dict = nn.Convad(lowercase_ , lowercase_ , 1 )
snake_case_ : Union[str, Any] = PoolFormerDropPath(lowercase_ )
if isinstance(config.hidden_act , lowercase_ ):
snake_case_ : str = ACTaFN[config.hidden_act]
else:
snake_case_ : Dict = config.hidden_act
def _snake_case ( self : int , lowercase_ : Optional[int] ):
snake_case_ : Dict = self.conva(lowercase_ )
snake_case_ : Dict = self.act_fn(lowercase_ )
snake_case_ : List[str] = self.drop(lowercase_ )
snake_case_ : int = self.conva(lowercase_ )
snake_case_ : Any = self.drop(lowercase_ )
return hidden_states
class _UpperCAmelCase ( nn.Module):
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str ):
super().__init__()
snake_case_ : Optional[Any] = PoolFormerPooling(lowercase_ )
snake_case_ : Tuple = PoolFormerOutput(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ : Optional[Any] = PoolFormerGroupNorm(lowercase_ )
snake_case_ : Tuple = PoolFormerGroupNorm(lowercase_ )
# Useful for training neural nets
snake_case_ : List[str] = PoolFormerDropPath(lowercase_ ) if drop_path > 0.0 else nn.Identity()
snake_case_ : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
snake_case_ : Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase_) ) , requires_grad=lowercase_ )
snake_case_ : int = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase_) ) , requires_grad=lowercase_ )
def _snake_case ( self : Union[str, Any] , lowercase_ : Optional[Any] ):
if self.use_layer_scale:
snake_case_ : Optional[Any] = self.pooling(self.before_norm(lowercase_ ) )
snake_case_ : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case_ : Union[str, Any] = hidden_states + self.drop_path(lowercase_ )
snake_case_ : List[Any] = ()
snake_case_ : int = self.output(self.after_norm(lowercase_ ) )
snake_case_ : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case_ : Optional[int] = hidden_states + self.drop_path(lowercase_ )
snake_case_ : List[Any] = (output,) + outputs
return outputs
else:
snake_case_ : Any = self.drop_path(self.pooling(self.before_norm(lowercase_ ) ) )
# First residual connection
snake_case_ : int = pooling_output + hidden_states
snake_case_ : int = ()
# Second residual connection inside the PoolFormerOutput block
snake_case_ : Tuple = self.drop_path(self.output(self.after_norm(lowercase_ ) ) )
snake_case_ : Dict = hidden_states + layer_output
snake_case_ : Any = (output,) + outputs
return outputs
class _UpperCAmelCase ( nn.Module):
def __init__( self : str , lowercase_ : Optional[Any] ):
super().__init__()
snake_case_ : List[str] = config
# stochastic depth decay rule
snake_case_ : Dict = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case_ : Optional[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case_ : Any = nn.ModuleList(lowercase_ )
# Transformer blocks
snake_case_ : Dict = []
snake_case_ : Optional[Any] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case_ : Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowercase_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowercase_ ) )
snake_case_ : Dict = nn.ModuleList(lowercase_ )
def _snake_case ( self : List[str] , lowercase_ : Dict , lowercase_ : List[Any]=False , lowercase_ : List[str]=True ):
snake_case_ : Union[str, Any] = () if output_hidden_states else None
snake_case_ : Optional[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case_, snake_case_ : List[Any] = layers
# Get patch embeddings from hidden_states
snake_case_ : Optional[int] = embedding_layer(lowercase_ )
# Send the embeddings through the blocks
for _, blk in enumerate(lowercase_ ):
snake_case_ : int = blk(lowercase_ )
snake_case_ : Optional[int] = layer_outputs[0]
if output_hidden_states:
snake_case_ : Any = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Union[str, Any] = PoolFormerConfig
_lowerCAmelCase : Union[str, Any] = """poolformer"""
_lowerCAmelCase : Optional[int] = """pixel_values"""
_lowerCAmelCase : List[Any] = True
def _snake_case ( self : Tuple , lowercase_ : List[str] ):
if isinstance(lowercase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase_ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self : Optional[Any] , lowercase_ : int , lowercase_ : List[Any]=False ):
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : Dict = value
lowercase__ : Optional[int] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase__ : str = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , lowerCAmelCase__ , )
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[Any] , lowercase_ : int ):
super().__init__(lowercase_ )
snake_case_ : Optional[Any] = config
snake_case_ : Tuple = PoolFormerEncoder(lowercase_ )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self : Optional[Any] ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self : int , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
snake_case_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
snake_case_ : str = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , )
snake_case_ : Union[str, Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
class _UpperCAmelCase ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int ):
super().__init__()
snake_case_ : Dict = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self : Any , lowercase_ : Union[str, Any] ):
snake_case_ : int = self.dense(lowercase_ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , lowerCAmelCase__ , )
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Dict , lowercase_ : Optional[Any] ):
super().__init__(lowercase_ )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Optional[int] = PoolFormerModel(lowercase_ )
# Final norm
snake_case_ : Optional[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case_ : Optional[int] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
snake_case_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Union[str, Any] = self.poolformer(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , )
snake_case_ : List[str] = outputs[0]
snake_case_ : Union[str, Any] = self.classifier(self.norm(lowercase_ ).mean([-2, -1] ) )
snake_case_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ : Tuple = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ : Union[str, Any] = '''single_label_classification'''
else:
snake_case_ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
snake_case_ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
snake_case_ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
snake_case_ : str = CrossEntropyLoss()
snake_case_ : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ : Any = BCEWithLogitsLoss()
snake_case_ : List[Any] = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
snake_case_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 155 |
"""simple docstring"""
import os
def __lowercase ( _a ):
snake_case_ : Tuple = len(grid[0] )
snake_case_ : Optional[int] = len(_a )
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_a ):
for j in range(n_rows - 3 ):
snake_case_ : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case_ : int = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case_ : Dict = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case_ : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case_ : List[str] = max(
_a , _a , _a , _a )
if max_product > largest:
snake_case_ : str = max_product
return largest
def __lowercase ( ):
snake_case_ : Tuple = []
with open(os.path.dirname(_a ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
snake_case_ : List[str] = [[int(_a ) for i in grid[j]] for j in range(len(_a ) )]
return largest_product(_a )
if __name__ == "__main__":
print(solution())
| 155 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowercase = logging.getLogger(__name__)
def snake_case__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=_A , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=_A , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=_A , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=_A , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=_A , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=_A , type=_A , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=_A , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=_A , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCAmelCase = parser.parse_args()
return args
def snake_case__ ( _A: Any ) -> Union[str, Any]:
'''simple docstring'''
def fn(_A: str ):
return tokenizer(examples["""text"""] )
return fn
def snake_case__ ( _A: Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCAmelCase = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCAmelCase = tf.train.Features(feature=_A )
lowerCAmelCase = tf.train.Example(features=_A )
lowerCAmelCase = example.SerializeToString()
records.append(_A )
return records
def snake_case__ ( _A: Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase = min(len(_A ) , args.limit )
lowerCAmelCase = dataset.select(range(_A ) )
print(f"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase = tokenize_function(_A )
lowerCAmelCase = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A: Dict ):
# Concatenate all texts.
lowerCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase = dataset_tokenized.map(_A , batched=_A , batch_size=1000 , num_proc=4 )
lowerCAmelCase = 0
lowerCAmelCase = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase = len(dataset_snapshot["""input_ids"""] )
lowerCAmelCase = os.path.join(_A , f"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase = serialized_examples[i]
out_file.write(_A )
print("""Wrote file {} containing {} records""".format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , """w""" ) as f:
print(f"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
__lowercase = parse_args()
main(args)
| 272 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 1 |
from __future__ import annotations
class _snake_case :
def __init__( self , _lowerCamelCase ):
a :List[str] = data
a :Node | None = None
a :Node | None = None
def __lowerCamelCase ( UpperCAmelCase_ : Node | None ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __lowerCamelCase ( UpperCAmelCase_ : Node | None ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __lowerCamelCase ( UpperCAmelCase_ : Node ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __lowerCamelCase ( ): # Main function for testing.
"""simple docstring"""
a :Any = Node(1 )
a :Tuple = Node(2 )
a :Optional[Any] = Node(3 )
a :int = Node(4 )
a :Union[str, Any] = Node(5 )
a :Dict = Node(6 )
a :List[Any] = Node(7 )
a :Dict = Node(8 )
a :List[str] = Node(9 )
print(is_full_binary_tree(UpperCAmelCase_ ) )
print(depth_of_tree(UpperCAmelCase_ ) )
print('''Tree is: ''' )
display(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 356 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : Any = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 281 | 0 |
UpperCamelCase = [0, 2, 4, 6, 8]
UpperCamelCase = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : str = 0
for digit in range(10):
lowercase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase)
return result
lowercase__ : Dict = 0
for digita in range(10):
lowercase__ : int = digita
if (remainder + digita) % 2 == 0:
lowercase__ : Optional[Any] = ODD_DIGITS
else:
lowercase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase : int = 9):
lowercase__ : Tuple = 0
for length in range(1 , max_power + 1):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase)
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 87 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Dict = TaTokenizerFast
lowerCAmelCase : Optional[int] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
"""simple docstring"""
from PIL import Image
def lowerCAmelCase( __lowerCamelCase ):
__a = image.size
__a = 0
__a = image.load()
for i in range(_A ):
for j in range(_A ):
__a = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_A ):
for i in range(_A ):
__a = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase_ : Any = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 358 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Any = ConsistencyModelPipeline
a__ : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a__ : List[Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _lowercase (self : int ):
UpperCAmelCase_ = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def _lowercase (self : List[str] , __a : Tuple=False ):
if class_cond:
UpperCAmelCase_ = self.dummy_cond_unet
else:
UpperCAmelCase_ = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def _lowercase (self : Any , __a : List[str] , __a : Tuple=0 ):
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = ConsistencyModelPipeline(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components(class_cond=__a )
UpperCAmelCase_ = ConsistencyModelPipeline(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 0
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowercase (self : str ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = ConsistencyModelPipeline(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components(class_cond=__a )
UpperCAmelCase_ = ConsistencyModelPipeline(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : str=0 , __a : str=False , __a : List[str]="cpu" , __a : str=torch.floataa , __a : int=(1, 3, 64, 64) ):
UpperCAmelCase_ = torch.manual_seed(__a )
UpperCAmelCase_ = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
UpperCAmelCase_ = self.get_fixed_latents(seed=__a , device=__a , dtype=__a , shape=__a )
UpperCAmelCase_ = latents
return inputs
def _lowercase (self : Any , __a : List[Any]=0 , __a : Tuple="cpu" , __a : Optional[int]=torch.floataa , __a : str=(1, 3, 64, 64) ):
if type(__a ) == str:
UpperCAmelCase_ = torch.device(__a )
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
return latents
def _lowercase (self : str ):
UpperCAmelCase_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowercase (self : int ):
UpperCAmelCase_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(get_fixed_latents=__a , device=__a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowercase (self : Any ):
UpperCAmelCase_ = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_inputs(get_fixed_latents=__a , device=__a )
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
UpperCAmelCase_ = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 1 | '''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __a ( *_a , **_a ) -> Tuple:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ (unittest.TestCase ):
lowerCamelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __a ( self , _a , _a , _a ) -> Union[str, Any]:
lowerCAmelCase_ = ObjectDetectionPipeline(model=_a , image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __a ( self , _a , _a ) -> Optional[Any]:
lowerCAmelCase_ = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
"score": ANY(_a ),
"label": ANY(_a ),
"box": {"xmin": ANY(_a ), "ymin": ANY(_a ), "xmax": ANY(_a ), "ymax": ANY(_a )},
} , )
import datasets
lowerCAmelCase_ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowerCAmelCase_ = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
lowerCAmelCase_ = object_detector(_a , threshold=0.0 )
self.assertEqual(len(_a ) , len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
"score": ANY(_a ),
"label": ANY(_a ),
"box": {"xmin": ANY(_a ), "ymin": ANY(_a ), "xmax": ANY(_a ), "ymax": ANY(_a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __a ( self ) -> Union[str, Any]:
pass
@require_torch
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = "hf-internal-testing/tiny-detr-mobilenetsv3"
lowerCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_a )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(_a )
lowerCAmelCase_ = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def __a ( self ) -> int:
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_a )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(_a )
lowerCAmelCase_ = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __a ( self ) -> Tuple:
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = pipeline("object-detection" , model=_a )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
lowerCAmelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = 0.9_9_8_5
lowerCAmelCase_ = "facebook/detr-resnet-50"
lowerCAmelCase_ = pipeline("object-detection" , model=_a )
lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=_a )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __a ( self ) -> Tuple:
lowerCAmelCase_ = "Narsil/layoutlmv3-finetuned-funsd"
lowerCAmelCase_ = 0.9_9_9_3
lowerCAmelCase_ = pipeline("object-detection" , model=_a , threshold=_a )
lowerCAmelCase_ = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 22 |
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bertabs'''
def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_pos
lowerCAmelCase_ = enc_layers
lowerCAmelCase_ = enc_hidden_size
lowerCAmelCase_ = enc_heads
lowerCAmelCase_ = enc_ff_size
lowerCAmelCase_ = enc_dropout
lowerCAmelCase_ = dec_layers
lowerCAmelCase_ = dec_hidden_size
lowerCAmelCase_ = dec_heads
lowerCAmelCase_ = dec_ff_size
lowerCAmelCase_ = dec_dropout
| 22 | 1 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class a_ ( _lowerCAmelCase ):
__A = "autoformer"
__A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Any , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "student_t" , lowercase : str = "nll" , lowercase : int = 1 , lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase : bool = True , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = 0 , lowercase : Optional[List[int]] = None , lowercase : Optional[List[int]] = None , lowercase : int = 64 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 32 , lowercase : int = 32 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 100 , lowercase : float = 0.02 , lowercase : bool = True , lowercase : int=True , lowercase : int = 10 , lowercase : int = 25 , lowercase : int = 3 , **lowercase : Dict , ):
"""simple docstring"""
lowercase_ :str = prediction_length
lowercase_ :Dict = context_length if context_length is not None else prediction_length
lowercase_ :Any = distribution_output
lowercase_ :Tuple = loss
lowercase_ :Dict = input_size
lowercase_ :Tuple = num_time_features
lowercase_ :int = lags_sequence
lowercase_ :Tuple = scaling
lowercase_ :List[Any] = num_dynamic_real_features
lowercase_ :Union[str, Any] = num_static_real_features
lowercase_ :str = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase_ :Optional[int] = cardinality
else:
lowercase_ :Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase_ :Tuple = embedding_dimension
else:
lowercase_ :Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase_ :Any = num_parallel_samples
# Transformer architecture configuration
lowercase_ :Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase_ :Union[str, Any] = d_model
lowercase_ :Optional[Any] = encoder_attention_heads
lowercase_ :Optional[Any] = decoder_attention_heads
lowercase_ :Optional[int] = encoder_ffn_dim
lowercase_ :int = decoder_ffn_dim
lowercase_ :Any = encoder_layers
lowercase_ :Optional[Any] = decoder_layers
lowercase_ :Dict = dropout
lowercase_ :Dict = attention_dropout
lowercase_ :str = activation_dropout
lowercase_ :int = encoder_layerdrop
lowercase_ :Dict = decoder_layerdrop
lowercase_ :List[str] = activation_function
lowercase_ :int = init_std
lowercase_ :Optional[int] = use_cache
# Autoformer
lowercase_ :List[str] = label_length
lowercase_ :List[str] = moving_average
lowercase_ :Optional[int] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def lowercase__ ( self : int ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 223 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223 | 1 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
a :List[Any] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __a :
'''simple docstring'''
def __init__( self , _a = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError("""Unsupported Group""" )
SCREAMING_SNAKE_CASE__ : List[Any] = primes[group]['''prime''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = primes[group]['''generator''']
SCREAMING_SNAKE_CASE__ : Any = int(hexlify(urandom(32 ) ) , base=16 )
def _a ( self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = pow(self.generator , self.__private_key , self.prime )
return hex(__lowercase )[2:]
def _a ( self , _a ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(__lowercase , (self.prime - 1) // 2 , self.prime ) == 1
)
def _a ( self , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = int(__lowercase , base=16 )
if not self.is_valid_public_key(__lowercase ):
raise ValueError("""Invalid public key""" )
SCREAMING_SNAKE_CASE__ : str = pow(__lowercase , self.__private_key , self.prime )
return shaaaa(str(__lowercase ).encode() ).hexdigest()
@staticmethod
def _a ( _a , _a ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(__lowercase , (prime - 1) // 2 , __lowercase ) == 1
)
@staticmethod
def _a ( _a , _a , _a = 14 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = int(__lowercase , base=16 )
SCREAMING_SNAKE_CASE__ : Dict = int(__lowercase , base=16 )
SCREAMING_SNAKE_CASE__ : int = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(__lowercase , __lowercase ):
raise ValueError("""Invalid public key""" )
SCREAMING_SNAKE_CASE__ : Any = pow(__lowercase , __lowercase , __lowercase )
return shaaaa(str(__lowercase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : int = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE__ : List[Any] = len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(__lowerCAmelCase ) , """Postfix""".center(__lowerCAmelCase ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE__ : Optional[int] = """)""" # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE__ : Optional[Any] = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a :Optional[int] = input("\nEnter an Infix Equation = ") # Input an Infix equation
a :Dict = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 56 | 0 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A_ :str = logging.get_logger(__name__)
logging.set_verbosity_info()
def A ( a_ ,a_ ) -> Dict:
if "xprophetnet" in prophetnet_checkpoint_path:
__UpperCamelCase : Union[str, Any] =XLMProphetNetForConditionalGenerationOld.from_pretrained(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] =XLMProphetNetForConditionalGeneration.from_pretrained(
_UpperCAmelCase ,output_loading_info=_UpperCAmelCase )
else:
__UpperCamelCase : str =ProphetNetForConditionalGenerationOld.from_pretrained(_UpperCAmelCase )
__UpperCamelCase : int =ProphetNetForConditionalGeneration.from_pretrained(
_UpperCAmelCase ,output_loading_info=_UpperCAmelCase )
__UpperCamelCase : List[str] =['key_proj', 'value_proj', 'query_proj']
__UpperCamelCase : Union[str, Any] ={
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
__UpperCamelCase : Optional[Any] =key.split('.' )
if attributes[0] == "lm_head":
__UpperCamelCase : Optional[Any] =prophet
__UpperCamelCase : Union[str, Any] =prophet_old
else:
__UpperCamelCase : int =prophet.prophetnet
__UpperCamelCase : Optional[int] =prophet_old.model
__UpperCamelCase : Dict =False
for attribute in attributes:
if attribute in mapping:
__UpperCamelCase : Any =mapping[attribute]
if not hasattr(_UpperCAmelCase ,_UpperCAmelCase ) and len(_UpperCAmelCase ) > 0:
__UpperCamelCase : Any =attribute
elif hasattr(_UpperCAmelCase ,_UpperCAmelCase ):
__UpperCamelCase : Union[str, Any] =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__UpperCamelCase : Union[str, Any] =old_model.weight
logger.info(F'{attribute} is initialized.' )
__UpperCamelCase : Optional[Any] =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__UpperCamelCase : List[Any] =old_model.bias
logger.info(F'{attribute} is initialized' )
__UpperCamelCase : Union[str, Any] =True
break
elif attribute in special_keys and hasattr(_UpperCAmelCase ,'in_proj_weight' ):
__UpperCamelCase : Union[str, Any] =old_model.in_proj_weight.shape[0] // 3
__UpperCamelCase : Dict =getattr(_UpperCAmelCase ,_UpperCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__UpperCamelCase : Tuple =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__UpperCamelCase : List[str] =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__UpperCamelCase : Optional[int] =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__UpperCamelCase : Optional[Any] =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__UpperCamelCase : Optional[int] =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__UpperCamelCase : Any =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__UpperCamelCase : Optional[Any] =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__UpperCamelCase : Any =nn.Parameter(old_model.embed_positions.weight[:512, :] )
__UpperCamelCase : Optional[Any] =True
break
if attribute.isdigit():
__UpperCamelCase : Optional[Any] =model[int(_UpperCAmelCase )]
__UpperCamelCase : List[str] =old_model[int(_UpperCAmelCase )]
else:
__UpperCamelCase : int =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if old_attribute == "":
__UpperCamelCase : Optional[int] =old_model
else:
if not hasattr(_UpperCAmelCase ,_UpperCAmelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__UpperCamelCase : Optional[int] =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A_ :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ :str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 71 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : int = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
_UpperCAmelCase : Any = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[Any] = do_lower_case
lowerCamelCase__ : str = strip_accents
lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : str = do_lower_case
def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase__ : Optional[int] = text
lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
lowerCamelCase__ : Tuple = batch_text_pair[idx]
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Any = encoded_candidates.get('input_ids' )
lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50 | 0 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
return 1 / (1 + np.exp(-z ))
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowercase__: List[str] = np.dot(__lowerCamelCase , __lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=7_0_0_0_0 ) -> Union[str, Any]:
lowercase__: Optional[Any] = np.zeros(x.shape[1] )
for iterations in range(__lowerCamelCase ):
lowercase__: int = np.dot(__lowerCamelCase , __lowerCamelCase )
lowercase__: List[str] = sigmoid_function(__lowerCamelCase )
lowercase__: Tuple = np.dot(x.T , h - y ) / y.size
lowercase__: Union[str, Any] = theta - alpha * gradient # updating the weights
lowercase__: Optional[int] = np.dot(__lowerCamelCase , __lowerCamelCase )
lowercase__: List[Any] = sigmoid_function(__lowerCamelCase )
lowercase__: Optional[int] = cost_function(__lowerCamelCase , __lowerCamelCase )
if iterations % 1_0_0 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__A = datasets.load_iris()
__A = iris.data[:, :2]
__A = (iris.target != 0) * 1
__A = 0.1
__A = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print("theta: ", theta) # printing the theta i.e our weights vector
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
return sigmoid_function(
np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((__A) ,(__A)) = (x[:, 0].min(), x[:, 0].max())
((__A) ,(__A)) = (x[:, 1].min(), x[:, 1].max())
((__A) ,(__A)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__A = np.c_[xxa.ravel(), xxa.ravel()]
__A = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 359 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "rag"
_UpperCAmelCase :List[Any] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' )
lowercase__: Any = question_encoder_config.pop('''model_type''' )
lowercase__: Tuple = kwargs.pop('''generator''' )
lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = reduce_loss
lowercase__: str = label_smoothing
lowercase__: Dict = exclude_bos_score
lowercase__: Any = do_marginalize
lowercase__: Optional[int] = title_sep
lowercase__: Any = doc_sep
lowercase__: Any = n_docs
lowercase__: List[Any] = max_combined_length
lowercase__: int = dataset
lowercase__: int = dataset_split
lowercase__: str = index_name
lowercase__: Dict = retrieval_vector_size
lowercase__: Dict = retrieval_batch_size
lowercase__: List[str] = passages_path
lowercase__: str = index_path
lowercase__: Optional[Any] = use_dummy_dataset
lowercase__: str = output_retrieved
lowercase__: List[str] = do_deduplication
lowercase__: List[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.question_encoder.to_dict()
lowercase__: str = self.generator.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 2 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowercase ( __snake_case : str ):
def decorator(__snake_case : int ):
lowercase_ : int = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def lowercase ( *__snake_case : List[str] ):
def decorator(__snake_case : Dict ):
lowercase_ : int = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class _UpperCAmelCase ( _A ):
def __new__( cls : List[str] , A : Tuple , A : str , A : Union[str, Any] ) -> str:
lowercase_ : Optional[Any] = super().__new__(cls , A , A , A )
if not hasattr(A , '''key_handler''' ):
setattr(A , '''key_handler''' , {} )
setattr(A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase_ : Dict = getattr(A , '''handle_key''' , [] )
for key in handled_keys:
lowercase_ : int = value
return new_cls
@staticmethod
def A ( cls : Tuple ) -> Optional[Any]:
lowercase_ : Dict = get_character()
if char != KEYMAP["undefined"]:
lowercase_ : str = ord(A )
lowercase_ : Any = cls.key_handler.get(A )
if handler:
lowercase_ : Optional[int] = char
return handler(cls )
else:
return None
def lowercase ( cls : List[str] ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 33 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 | 0 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCAmelCase_ (lowerCAmelCase__: Dataset , lowerCAmelCase__: Dict[str, str] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = args.log_outputs
UpperCAmelCase_: Any = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCAmelCase_: Optional[int] = load_metric("""wer""" )
UpperCAmelCase_: List[Any] = load_metric("""cer""" )
# compute metrics
UpperCAmelCase_: Any = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
UpperCAmelCase_: int = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
UpperCAmelCase_: List[Any] = F'WER: {wer_result}\nCER: {cer_result}'
print(lowerCAmelCase__ )
with open(F'{dataset_id}_eval_results.txt' , """w""" ) as f:
f.write(lowerCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase_: List[str] = F'log_{dataset_id}_predictions.txt'
UpperCAmelCase_: str = F'log_{dataset_id}_targets.txt'
with open(lowerCAmelCase__ , """w""" ) as p, open(lowerCAmelCase__ , """w""" ) as t:
# mapping function to write output
def write_to_file(lowerCAmelCase__: Any , lowerCAmelCase__: Union[str, Any] ):
p.write(F'{i}' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F'{i}' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(lowerCAmelCase__ , with_indices=lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Dict = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase_: Tuple = re.sub(lowerCAmelCase__ , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase_: Any = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCAmelCase_: Union[str, Any] = """ """.join(text.split(lowerCAmelCase__ ) )
return text
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase_: int = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase_: int = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase_: Tuple = dataset.cast_column("""audio""" , Audio(sampling_rate=lowerCAmelCase__ ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase_: Optional[int] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase_: Dict = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCAmelCase__: Any ):
UpperCAmelCase_: Optional[int] = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase_: Union[str, Any] = prediction["""text"""]
UpperCAmelCase_: Dict = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCAmelCase_: Any = dataset.map(lowerCAmelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
a : Optional[int] = parser.parse_args()
main(args)
| 366 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase_: Tuple = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase_: List[str] = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase_: Optional[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
UpperCAmelCase_: Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase_: Any = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase_: str = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
UpperCAmelCase_: Tuple = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase_: int = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase_: Any = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'block{idx}' , F'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
UpperCAmelCase_: Dict = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase_: Tuple = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase_: str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase_: Tuple = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase_: Optional[int] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase_: Optional[int] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase_: Tuple = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase_: List[str] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase_: Optional[int] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase_: List[str] = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowerCAmelCase__ )-1}' )
if "bot_conv" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase_: Any = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase_: Dict = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase_: Any = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase_: Union[str, Any] = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase_: List[Any] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase_: Union[str, Any] = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase_: Union[str, Any] = value
return new_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase_: str = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCAmelCase_: Tuple = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase_: Any = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase_: Tuple = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase_: Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase_: int = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_: List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[Any]=False , lowerCAmelCase__: Optional[Any]=None ):
"""simple docstring"""
UpperCAmelCase_: str = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase_: Dict = GLPNImageProcessor()
# prepare image
UpperCAmelCase_: List[str] = prepare_img()
UpperCAmelCase_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase_: Any = torch.load(lowerCAmelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase_: Optional[Any] = rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
UpperCAmelCase_: Dict = GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
UpperCAmelCase_: Any = model(lowerCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
UpperCAmelCase_: Any = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a : List[Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
A__ = iter(UpperCamelCase__ )
while True:
A__ = tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
A__ = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
A__ = """"""
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
A__ = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
A__ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
A__ = generate_table(UpperCamelCase__ )
A__ = prepare_input(UpperCamelCase__ )
A__ = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
A__ = divmod(table.index(UpperCamelCase__ ) , 5 )
A__ = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
A__ = generate_table(UpperCamelCase__ )
A__ = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
A__ = divmod(table.index(UpperCamelCase__ ) , 5 )
A__ = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 247 |
"""simple docstring"""
_snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}]
_snake_case = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 294 | 0 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = "sequence-classification"
def __init__( self : str , __a : int ) -> Dict:
if type(_lowercase ) == dict:
_UpperCamelCase : List[str] = Namespace(**_lowercase )
_UpperCamelCase : Optional[Any] = glue_output_modes[hparams.task]
_UpperCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(_lowercase , _lowercase , self.mode )
def __SCREAMING_SNAKE_CASE ( self : Tuple , **__a : List[str] ) -> int:
return self.model(**_lowercase )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int , __a : Any ) -> Dict:
_UpperCamelCase : List[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_UpperCamelCase : Dict = self(**_lowercase )
_UpperCamelCase : str = outputs[0]
_UpperCamelCase : Any = self.trainer.lr_schedulers[0]["scheduler"]
_UpperCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
_UpperCamelCase : Optional[Any] = self.hparams
_UpperCamelCase : Optional[Any] = processors[args.task]()
_UpperCamelCase : int = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCamelCase : Optional[Any] = self._feature_file(_lowercase )
if os.path.exists(_lowercase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , _lowercase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
_UpperCamelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
_UpperCamelCase : Optional[int] = convert_examples_to_features(
_lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , _lowercase )
torch.save(_lowercase , _lowercase )
def __SCREAMING_SNAKE_CASE ( self : int , __a : str , __a : int , __a : bool = False ) -> int:
_UpperCamelCase : int = "dev" if mode == "test" else mode
_UpperCamelCase : Dict = self._feature_file(_lowercase )
logger.info("Loading features from cached file %s" , _lowercase )
_UpperCamelCase : Union[str, Any] = torch.load(_lowercase )
_UpperCamelCase : Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_UpperCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_UpperCamelCase : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCamelCase : Dict = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(_lowercase , _lowercase , _lowercase , _lowercase ) , batch_size=_lowercase , shuffle=_lowercase , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Any ) -> List[str]:
_UpperCamelCase : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCamelCase : Dict = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
_UpperCamelCase : Any = self(**_lowercase )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs[:2]
_UpperCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
_UpperCamelCase : Dict = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __SCREAMING_SNAKE_CASE ( self : str , __a : int ) -> Optional[Any]:
_UpperCamelCase : Any = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
_UpperCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCamelCase : int = np.argmax(_lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCamelCase : Any = np.squeeze(_lowercase )
_UpperCamelCase : List[str] = np.concatenate([x["target"] for x in outputs] , axis=0 )
_UpperCamelCase : str = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCamelCase : Tuple = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , _lowercase , _lowercase )}
_UpperCamelCase : Union[str, Any] = dict(results.items() )
_UpperCamelCase : Dict = results
return ret, preds_list, out_label_list
def __SCREAMING_SNAKE_CASE ( self : int , __a : list ) -> int:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = self._eval_end(_lowercase )
_UpperCamelCase : List[Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : int = self._eval_end(_lowercase )
_UpperCamelCase : Union[str, Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] , __a : Dict ) -> Union[str, Any]:
BaseTransformer.add_model_specific_args(_lowercase , _lowercase )
parser.add_argument(
"--max_seq_length" , default=128 , type=_lowercase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=_lowercase , required=_lowercase , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=_lowercase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : str = argparse.ArgumentParser()
add_generic_args(snake_case_ ,os.getcwd() )
_UpperCamelCase : Dict = GLUETransformer.add_model_specific_args(snake_case_ ,os.getcwd() )
_UpperCamelCase : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCamelCase : Optional[int] = os.path.join(
"./results" ,F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' ,)
os.makedirs(args.output_dir )
_UpperCamelCase : Dict = GLUETransformer(snake_case_ )
_UpperCamelCase : Optional[int] = generic_train(snake_case_ ,snake_case_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCamelCase : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=snake_case_ ) )
_UpperCamelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(snake_case_ )
if __name__ == "__main__":
main()
| 370 |
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 0 |
'''simple docstring'''
import functools
from typing import Any
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
# Validation
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) or len(SCREAMING_SNAKE_CASE__ ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) or not all(
isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
UpperCAmelCase_ : dict[str, Any] = {}
UpperCAmelCase_ : List[Any] = "WORD_KEEPER"
for word in words:
UpperCAmelCase_ : Any = trie
for c in word:
if c not in trie_node:
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Dict = trie_node[c]
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Tuple = len(SCREAMING_SNAKE_CASE__ )
# Dynamic programming method
@functools.cache
def is_breakable(SCREAMING_SNAKE_CASE__ : Optional[int] ) -> bool:
if index == len_string:
return True
UpperCAmelCase_ : Optional[Any] = trie
for i in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Union[str, Any] = trie_node.get(string[i], SCREAMING_SNAKE_CASE__ )
if trie_node is None:
return False
if trie_node.get(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 125 |
import os
import pytest
from attr import dataclass
_UpperCAmelCase : List[str] = "us-east-1" # defaults region
@dataclass
class __lowerCAmelCase :
_a = 42
_a = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_a = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5500,
}
_a = {**hyperparameters, '''max_steps''': 1000}
@property
def SCREAMING_SNAKE_CASE ( self: str ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def SCREAMING_SNAKE_CASE ( self: Dict ):
return F"{self.framework}-transfromers-test"
@property
def SCREAMING_SNAKE_CASE ( self: Any ):
return F"./tests/sagemaker/scripts/{self.framework}"
@property
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Union[str, Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 236 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
lowerCAmelCase : Dict = key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
lowerCAmelCase : str = key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase : str = key[key.find("patch_embed" ) + len("patch_embed" )]
lowerCAmelCase : Dict = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(a_ )-1}""" )
if "norm" in key:
lowerCAmelCase : int = key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase : Optional[int] = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
lowerCAmelCase : Any = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(a_ )-1}""" )
if "layer_norm1" in key:
lowerCAmelCase : str = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
lowerCAmelCase : Optional[Any] = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase : Union[str, Any] = key[key.find("block" ) + len("block" )]
lowerCAmelCase : List[Any] = key.replace(f"""block{idx}""" , f"""block.{int(a_ )-1}""" )
if "attn.q" in key:
lowerCAmelCase : Tuple = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
lowerCAmelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
lowerCAmelCase : Union[str, Any] = key.replace("attn" , "attention.self" )
if "fc1" in key:
lowerCAmelCase : int = key.replace("fc1" , "dense1" )
if "fc2" in key:
lowerCAmelCase : List[Any] = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
lowerCAmelCase : Dict = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
lowerCAmelCase : Optional[int] = key.replace("linear_fuse.conv" , "linear_fuse" )
lowerCAmelCase : Optional[int] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase : int = key[key.find("linear_c" ) + len("linear_c" )]
lowerCAmelCase : Any = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(a_ )-1}""" )
if "bot_conv" in key:
lowerCAmelCase : int = key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
lowerCAmelCase : Tuple = key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
lowerCAmelCase : Optional[int] = key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
lowerCAmelCase : Dict = key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
lowerCAmelCase : str = key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
lowerCAmelCase : Optional[int] = key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
lowerCAmelCase : str = key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
lowerCAmelCase : Dict = key.replace("module.last_layer_depth" , "head.head" )
lowerCAmelCase : Dict = value
return new_state_dict
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCAmelCase : Any = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase : Tuple = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase : Tuple = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase : str = kv_bias[config.hidden_sizes[i] :]
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase : List[str] = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : List[str]=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase : List[str] = GLPNImageProcessor()
# prepare image
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Tuple = image_processor(images=a_ , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
lowerCAmelCase : Dict = torch.load(a_ , map_location=torch.device("cpu" ) )
# rename keys
lowerCAmelCase : List[Any] = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
lowerCAmelCase : List[str] = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCAmelCase : Union[str, Any] = model(a_ )
lowerCAmelCase : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase : Dict = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase : Optional[int] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
lowerCAmelCase : List[str] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , a_ , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=a_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=a_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 364 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
lowerCAmelCase : Tuple = parser.parse_args()
return args.f
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = {}
lowerCAmelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE , "all_results.json" )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , "r" ) as f:
lowerCAmelCase : str = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""can't find {path}""" )
return results
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCAmelCase : Any = tempfile.mkdtemp()
lowerCAmelCase : List[Any] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase : Optional[int] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : int = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
lowerCAmelCase : Dict = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Tuple = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCAmelCase : List[str] = get_results(snake_case__ )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Tuple = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : Any = get_results(snake_case__ )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : int = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : str = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : List[str] = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : Optional[Any] = get_results(snake_case__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : List[Any] = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : Tuple = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Tuple = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : str = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Tuple = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "translation_no_trainer" ) ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Any = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
lowerCAmelCase : Dict = get_results(snake_case__ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "image_classification_no_trainer" ) ) )
| 133 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Optional[int] = LDMTextToImagePipeline
_lowercase : int = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
_lowercase : str = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_lowercase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(a)
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Tuple:
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = LDMTextToImagePipeline(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe(**a).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
SCREAMING_SNAKE_CASE = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , a , a=torch.floataa , a=0) -> Optional[int]:
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
SCREAMING_SNAKE_CASE = np.random.RandomState(a).standard_normal((1, 4, 32, 32))
SCREAMING_SNAKE_CASE = torch.from_numpy(a).to(device=a , dtype=a)
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256').to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_inputs(a)
SCREAMING_SNAKE_CASE = pipe(**a).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78])
SCREAMING_SNAKE_CASE = np.abs(expected_slice - image_slice).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , a , a=torch.floataa , a=0) -> Tuple:
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
SCREAMING_SNAKE_CASE = np.random.RandomState(a).standard_normal((1, 4, 32, 32))
SCREAMING_SNAKE_CASE = torch.from_numpy(a).to(device=a , dtype=a)
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256').to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_inputs(a)
SCREAMING_SNAKE_CASE = pipe(**a).images[0]
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy')
SCREAMING_SNAKE_CASE = np.abs(expected_image - image).max()
assert max_diff < 1E-3
| 137 |
def lowerCamelCase__ (_UpperCAmelCase):
def merge(_UpperCAmelCase , _UpperCAmelCase) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0)
yield from left
yield from right
return list(_merge())
if len(_UpperCAmelCase) <= 1:
return collection
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) // 2
return merge(merge_sort(collection[:mid]) , merge_sort(collection[mid:]))
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Tuple = input('Enter numbers separated by a comma:\n').strip()
a_ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 137 | 1 |
def lowerCAmelCase_ ( __A, __A, __A ) -> int:
'''simple docstring'''
def count_of_possible_combinations(__A ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__A )
def lowerCAmelCase_ ( __A, __A, __A ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
__A, __A ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCAmelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item, __A )
for item in array )
UpperCAmelCase__ = answer
return answer
UpperCAmelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__A, __A )
def lowerCAmelCase_ ( __A, __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = [0] * (target + 1)
UpperCAmelCase__ = 1
for i in range(1, target + 1 ):
for j in range(__A ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = 3
UpperCamelCase__ = 5
UpperCamelCase__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 143 | import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name", type=__A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", )
parser.add_argument(
"--dataset_config", type=__A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path", type=__A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", )
parser.add_argument(
"--shard_size", type=__A, default=1_000, help="Number of entries to go in a single shard.", )
parser.add_argument("--split", type=__A, default="train", choices=["train", "test", "validation"] )
parser.add_argument(
"--limit", default=__A, type=__A, help="Limit the number of shards (used for debugging).", )
parser.add_argument(
"--max_length", type=__A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8.", )
parser.add_argument(
"--output_dir", default="tf-tpu", type=__A, help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket.", )
UpperCAmelCase__ = parser.parse_args()
return args
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
def fn(__A ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(len(tokenized_data["input_ids"] ) ):
UpperCAmelCase__ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
UpperCAmelCase__ = tf.train.Features(feature=__A )
UpperCAmelCase__ = tf.train.Example(features=__A )
UpperCAmelCase__ = example.SerializeToString()
records.append(__A )
return records
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
UpperCAmelCase__ = min(len(__A ), args.limit )
UpperCAmelCase__ = dataset.select(range(__A ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCAmelCase__ = os.path.join(args.output_dir, args.split )
if not os.path.exists(__A ):
os.makedirs(__A )
else:
UpperCAmelCase__ = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
UpperCAmelCase__ = tokenize_function(__A )
UpperCAmelCase__ = dataset.map(__A, batched=__A, num_proc=4, remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__A ):
# Concatenate all texts.
UpperCAmelCase__ = {k: sum(examples[k], [] ) for k in examples.keys()}
UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCAmelCase__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCAmelCase__ = {
k: [t[i : i + args.max_length] for i in range(0, __A, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCAmelCase__ = dataset_tokenized.map(__A, batched=__A, batch_size=1_000, num_proc=4 )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
for shard in range(0, len(__A ), args.shard_size ):
UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size]
UpperCAmelCase__ = len(dataset_snapshot["input_ids"] )
UpperCAmelCase__ = os.path.join(__A, f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
UpperCAmelCase__ = get_serialized_examples(__A )
with tf.io.TFRecordWriter(__A ) as out_file:
for i in range(len(__A ) ):
UpperCAmelCase__ = serialized_examples[i]
out_file.write(__A )
print("Wrote file {} containing {} records".format(__A, __A ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""", "w" ) as f:
print(f"""Total {args.split} records: {total_records}""", file=__A )
if __name__ == "__main__":
UpperCamelCase__ = parse_args()
main(args)
| 143 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a ( _a ):
A = (UniPCMultistepScheduler,)
A = (('''num_inference_steps''', 25),)
def __snake_case (self, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Dict = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __snake_case (self, SCREAMING_SNAKE_CASE_=0, **SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: Any = dict(self.forward_default_kwargs )
UpperCAmelCase_: Tuple = kwargs.pop("""num_inference_steps""", SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: str = self.dummy_sample
UpperCAmelCase_: Any = 0.1 * sample
UpperCAmelCase_: Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_: Optional[int] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Tuple = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
UpperCAmelCase_: int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Dict = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
UpperCAmelCase_: Any = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_: Dict = sample, sample
for t in range(SCREAMING_SNAKE_CASE__, time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase_: Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCAmelCase_: List[str] = new_scheduler.step(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __snake_case (self, SCREAMING_SNAKE_CASE_=0, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Dict = dict(self.forward_default_kwargs )
UpperCAmelCase_: List[str] = kwargs.pop("""num_inference_steps""", SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: List[Any] = self.dummy_sample
UpperCAmelCase_: int = 0.1 * sample
UpperCAmelCase_: Any = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_: List[str] = self.get_scheduler_config()
UpperCAmelCase_: str = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_: Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_: str = scheduler.step(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCAmelCase_: List[str] = new_scheduler.step(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __snake_case (self, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> Any:
if scheduler is None:
UpperCAmelCase_: Tuple = self.scheduler_classes[0]
UpperCAmelCase_: Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: str = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: str = self.scheduler_classes[0]
UpperCAmelCase_: str = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: int = 10
UpperCAmelCase_: Any = self.dummy_model()
UpperCAmelCase_: str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: List[str] = scheduler.step(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __snake_case (self ) -> str:
UpperCAmelCase_: Optional[int] = dict(self.forward_default_kwargs )
UpperCAmelCase_: List[str] = kwargs.pop("""num_inference_steps""", SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_: Tuple = self.get_scheduler_config()
UpperCAmelCase_: Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: str = self.dummy_sample
UpperCAmelCase_: Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__, """set_timesteps""" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__, """set_timesteps""" ):
UpperCAmelCase_: Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_: Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
UpperCAmelCase_: Dict = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase_: List[str] = scheduler.timesteps[5]
UpperCAmelCase_: List[Any] = scheduler.timesteps[6]
UpperCAmelCase_: Dict = scheduler.step(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCAmelCase_: Dict = scheduler.step(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __snake_case (self ) -> List[str]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_: Optional[int] = UniPCMultistepScheduler(**self.get_scheduler_config() )
UpperCAmelCase_: str = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
UpperCAmelCase_: Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase_: str = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_: int = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_: Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_: Optional[int] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __snake_case (self ) -> Tuple:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __snake_case (self ) -> Optional[int]:
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__, prediction_type=SCREAMING_SNAKE_CASE__, sample_max_value=SCREAMING_SNAKE_CASE__, solver_order=SCREAMING_SNAKE_CASE__, solver_type=SCREAMING_SNAKE_CASE__, )
def __snake_case (self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __snake_case (self ) -> int:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__, solver_type=SCREAMING_SNAKE_CASE__, prediction_type=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_: Union[str, Any] = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__, solver_type=SCREAMING_SNAKE_CASE__, prediction_type=SCREAMING_SNAKE_CASE__, )
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def __snake_case (self ) -> Dict:
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def __snake_case (self ) -> List[str]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__, time_step=0 )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Tuple = self.full_loop()
UpperCAmelCase_: Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: int = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase_: str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_: List[str] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__, dynamic_thresholding_ratio=0 )
UpperCAmelCase_: List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Tuple = 10
UpperCAmelCase_: Dict = self.dummy_model()
UpperCAmelCase_: Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: int = scheduler.step(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
def __snake_case (self, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_: Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_: Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 147 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : str ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
| 139 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = r"""\w+[.]\d+"""
__lowerCamelCase = re.findall(_lowerCAmelCase , _lowerCAmelCase )
for pat in pats:
__lowerCamelCase = key.replace(_lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def lowerCamelCase__ ( A__ : Any , A__ : List[Any] , A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__lowerCamelCase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__lowerCamelCase = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__lowerCamelCase = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowerCamelCase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__lowerCamelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowerCamelCase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
__lowerCamelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowerCamelCase = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowerCamelCase = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( A__ : Any , A__ : int , A__ : Union[str, Any]=42 ):
'''simple docstring'''
__lowerCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__lowerCamelCase = flax_model.init_weights(PRNGKey(_lowerCAmelCase ) )
__lowerCamelCase = flatten_dict(_lowerCAmelCase )
__lowerCamelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCamelCase = rename_key(_lowerCAmelCase )
__lowerCamelCase = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
__lowerCamelCase = rename_key_and_reshape_tensor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
__lowerCamelCase = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase )
| 370 |
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29 | 0 |
'''simple docstring'''
import math
def __lowercase ( __lowercase ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowercase = 0.1 ) -> int:
'''simple docstring'''
_A = 3
_A = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DDIMPipeline
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_lowerCamelCase : List[str] = DDIMScheduler()
_lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler}
return components
def A_ ( self , lowercase , lowercase=0 ):
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : Dict = torch.manual_seed(lowercase )
else:
_lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : Tuple = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A_ ( self ):
_lowerCamelCase : Any = 'cpu'
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : str = self.get_dummy_inputs(lowercase )
_lowerCamelCase : int = pipe(**lowercase ).images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCamelCase : Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
_lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1E-3 )
def A_ ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32'
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddim.to(lowercase )
ddim.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256'
_lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddpm.to(lowercase )
ddpm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 96 | 0 |
'''simple docstring'''
import numpy
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : numpy.ndarray , UpperCAmelCase__ : numpy.ndarray ) -> None:
lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase = numpy.zeros(output_array.shape )
def __UpperCAmelCase ( self : Any ) -> numpy.ndarray:
lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __UpperCAmelCase ( self : Tuple ) -> None:
lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : numpy.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : numpy.ndarray ) -> int:
lowerCAmelCase = input_arr
lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def a_ ( lowerCamelCase : numpy.ndarray ):
return 1 / (1 + numpy.exp(-value ))
def a_ ( lowerCamelCase : numpy.ndarray ):
return (value) * (1 - (value))
def a_ ( ):
lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase , output_array=lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase , iterations=10 , give_loss=lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 55 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 55 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase : Tuple = BertConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
lowercase : Optional[Any] = BertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
_UpperCamelCase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase: Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 255 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int )->int:
# Initialise PyTorch model
A__ = BertConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
A__ = BertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
a__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a__: Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 193 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=False ) -> int:
try:
_lowerCAmelCase =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCAmelCase =default
else:
# KEY is set, convert it to True or False.
try:
_lowerCAmelCase =strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
__A = parse_flag_from_env('RUN_SLOW', default=False)
def _lowerCamelCase(__UpperCamelCase ) -> Any:
return unittest.skip("""Test was skipped""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> str:
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> str:
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[Any]:
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version(""">=""" , __UpperCamelCase ) , F'''test requires torch version >= {version}''' )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]:
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(__UpperCamelCase )
__A = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(__UpperCamelCase )
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = True
@classmethod
def _lowerCAmelCase ( cls ) -> Any:
_lowerCAmelCase =tempfile.mkdtemp()
@classmethod
def _lowerCAmelCase ( cls ) -> Optional[int]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _lowerCAmelCase ( self ) -> List[Any]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__UpperCAmelCase )
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase =mocks if isinstance(__UpperCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
_lowerCAmelCase =AcceleratorState()
_lowerCAmelCase =tensor[None].clone().to(state.device )
_lowerCAmelCase =gather(__UpperCamelCase ).cpu()
_lowerCAmelCase =tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_lowerCAmelCase =returncode
_lowerCAmelCase =stdout
_lowerCAmelCase =stderr
async def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
while True:
_lowerCAmelCase =await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(__UpperCamelCase ) )
_lowerCAmelCase =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCAmelCase =[]
_lowerCAmelCase =[]
def tee(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="" ):
_lowerCAmelCase =line.decode("""utf-8""" ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=180 , __UpperCamelCase=False , __UpperCamelCase=True ) -> _RunOutput:
_lowerCAmelCase =asyncio.get_event_loop()
_lowerCAmelCase =loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
_lowerCAmelCase =""" """.join(__UpperCamelCase )
if result.returncode > 0:
_lowerCAmelCase ="""\n""".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
pass
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=False ) -> int:
try:
_lowerCAmelCase =subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , """decode""" ):
_lowerCAmelCase =output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 369 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341 | 0 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _lowercase ( SCREAMING_SNAKE_CASE_ : ArgumentParser ) -> Tuple:
lowercase_ = parser.add_parser('''env''' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> int:
lowercase_ = huggingface_hub.__version__
lowercase_ = '''not installed'''
lowercase_ = '''NA'''
if is_torch_available():
import torch
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = '''not installed'''
if is_transformers_available():
import transformers
lowercase_ = transformers.__version__
lowercase_ = '''not installed'''
if is_accelerate_available():
import accelerate
lowercase_ = accelerate.__version__
lowercase_ = '''not installed'''
if is_xformers_available():
import xformers
lowercase_ = xformers.__version__
lowercase_ = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ) -> Any:
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 30 |
from __future__ import annotations
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase_ , lowercase_ = array[indexa], array[indexa]
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 30 | 1 |
"""simple docstring"""
import math
class _snake_case :
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any ):
__lowerCamelCase : str = 0.0
__lowerCamelCase : Any = 0.0
for i in range(len(lowerCamelCase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ):
for i in range(len(lowerCamelCase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : Any = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowerCamelCase : Any = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowerCamelCase : List[str] = SelfOrganizingMap()
__lowerCamelCase : Optional[int] = 3
__lowerCamelCase : Union[str, Any] = 0.5
for _ in range(_a ):
for j in range(len(_a ) ):
# training sample
__lowerCamelCase : Dict = training_samples[j]
# Compute the winning vector
__lowerCamelCase : Union[str, Any] = self_organizing_map.get_winner(_a , _a )
# Update the winning vector
__lowerCamelCase : Any = self_organizing_map.update(_a , _a , _a , _a )
# classify test sample
__lowerCamelCase : Any = [0, 0, 0, 1]
__lowerCamelCase : Optional[Any] = self_organizing_map.get_winner(_a , _a )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 360 | """simple docstring"""
import requests
__A = '''YOUR API KEY'''
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str = giphy_api_key ) -> list:
'''simple docstring'''
__lowerCamelCase : Dict = "+".join(query.split() )
__lowerCamelCase : Optional[int] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
__lowerCamelCase : Optional[Any] = requests.get(_lowerCamelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship'''))) | 64 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class a ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Dict , lowercase_ : List[str]=None , **lowercase_ : Union[str, Any] ):
super().__init__(features=lowercase_ )
snake_case_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A_ ( self : str , lowercase_ : str ):
import torch
if isinstance(lowercase_ , lowercase_ ) and column:
if all(
isinstance(lowercase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowercase_ )
return column
def A_ ( self : Any , lowercase_ : Tuple ):
import torch
if isinstance(lowercase_ , (str, bytes, type(lowercase_ )) ):
return value
elif isinstance(lowercase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case_ = {}
if isinstance(lowercase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
snake_case_ = {'''dtype''': torch.intaa}
elif isinstance(lowercase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case_ = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowercase_ , PIL.Image.Image ):
snake_case_ = np.asarray(lowercase_ )
return torch.tensor(lowercase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def A_ ( self : List[Any] , lowercase_ : Optional[int] ):
import torch
# support for torch, tf, jax etc.
if hasattr(lowercase_ , '''__array__''' ) and not isinstance(lowercase_ , torch.Tensor ):
snake_case_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowercase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowercase_ ) for substruct in data_struct] )
elif isinstance(lowercase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowercase_ ) for substruct in data_struct] )
return self._tensorize(lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : dict ):
return map_nested(self._recursive_tensorize , lowercase_ , map_list=lowercase_ )
def A_ ( self : Dict , lowercase_ : pa.Table ):
snake_case_ = self.numpy_arrow_extractor().extract_row(lowercase_ )
snake_case_ = self.python_features_decoder.decode_row(lowercase_ )
return self.recursive_tensorize(lowercase_ )
def A_ ( self : List[Any] , lowercase_ : pa.Table ):
snake_case_ = self.numpy_arrow_extractor().extract_column(lowercase_ )
snake_case_ = self.python_features_decoder.decode_column(lowercase_ , pa_table.column_names[0] )
snake_case_ = self.recursive_tensorize(lowercase_ )
snake_case_ = self._consolidate(lowercase_ )
return column
def A_ ( self : str , lowercase_ : pa.Table ):
snake_case_ = self.numpy_arrow_extractor().extract_batch(lowercase_ )
snake_case_ = self.python_features_decoder.decode_batch(lowercase_ )
snake_case_ = self.recursive_tensorize(lowercase_ )
for column_name in batch:
snake_case_ = self._consolidate(batch[column_name] )
return batch
| 56 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionDiffEditPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
def A ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : str , _A : List[str] , _A : Any=0 ) -> str:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1e-4 )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCAmelCase_ : int = pipe.generate_mask(**_A )
UpperCAmelCase_ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : List[Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = '''cpu'''
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : Tuple ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Any = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
UpperCAmelCase_ : Any = raw_image
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Tuple = '''a bowl of pears'''
UpperCAmelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[str] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase_ : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Dict = '''a bowl of pears'''
UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase_ : Dict = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 304 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase :int = logging.get_logger(__name__)
__UpperCAmelCase :Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__UpperCAmelCase :List[str] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__UpperCAmelCase :str = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def _a ( _lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : str = set()
__UpperCAmelCase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : str = char
__UpperCAmelCase : int = set(UpperCamelCase__ )
return pairs
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : List[Any]="</s>" , snake_case : Any="<s>" , snake_case : int="<unk>" , snake_case : List[Any]="<pad>" , snake_case : List[str]="<mask>" , **snake_case : List[Any] , ) -> Any:
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , **snake_case , )
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : str = merges_file
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : str = 2
__UpperCAmelCase : int = 3
self.add_from_file(snake_case )
__UpperCAmelCase : int = {v: k for k, v in self.encoder.items()}
with open(snake_case , encoding='''utf-8''' ) as merges_handle:
__UpperCAmelCase : Optional[Any] = merges_handle.read().split('''\n''' )[:-1]
__UpperCAmelCase : List[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__UpperCAmelCase : Any = dict(zip(snake_case , range(len(snake_case ) ) ) )
__UpperCAmelCase : List[str] = {}
def lowerCamelCase__ ( self : List[str] , snake_case : Optional[int] , snake_case : Any = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
__UpperCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Optional[int] , snake_case : List[Any] , snake_case : Tuple = None , snake_case : Union[str, Any] = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def lowerCamelCase__ ( self : List[str] , snake_case : int , snake_case : Any = None ) -> List[int]:
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
return len(self.encoder )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : int , snake_case : Dict ) -> List[Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : str = tuple(snake_case )
__UpperCAmelCase : List[str] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__UpperCAmelCase : List[Any] = get_pairs(snake_case )
if not pairs:
return token
while True:
__UpperCAmelCase : Union[str, Any] = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase : Any = bigram
__UpperCAmelCase : int = []
__UpperCAmelCase : str = 0
while i < len(snake_case ):
try:
__UpperCAmelCase : Tuple = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : int = tuple(snake_case )
__UpperCAmelCase : Optional[Any] = new_word
if len(snake_case ) == 1:
break
else:
__UpperCAmelCase : Dict = get_pairs(snake_case )
__UpperCAmelCase : Optional[Any] = '''@@ '''.join(snake_case )
__UpperCAmelCase : str = word[:-4]
__UpperCAmelCase : Any = word
return word
def lowerCamelCase__ ( self : List[str] , snake_case : Any ) -> Dict:
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Union[str, Any] = re.findall(r'''\S+\n?''' , snake_case )
for token in words:
split_tokens.extend(list(self.bpe(snake_case ).split(''' ''' ) ) )
return split_tokens
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : str ) -> Optional[Any]:
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Union[str, Any] ) -> List[str]:
return self.decoder.get(snake_case , self.unk_token )
def lowerCamelCase__ ( self : int , snake_case : str ) -> str:
__UpperCAmelCase : Optional[Any] = ''' '''.join(snake_case ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowerCamelCase__ ( self : Any , snake_case : Dict , snake_case : List[Any] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Dict = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase : Optional[Any] = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case ):
copyfile(self.merges_file , snake_case )
return out_vocab_file, out_merge_file
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Tuple ) -> Any:
if isinstance(snake_case , snake_case ):
try:
with open(snake_case , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(snake_case )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
__UpperCAmelCase : int = f.readlines()
for lineTmp in lines:
__UpperCAmelCase : Optional[int] = lineTmp.strip()
__UpperCAmelCase : str = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
__UpperCAmelCase : str = line[:idx]
__UpperCAmelCase : str = len(self.encoder ) | 352 |
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase :Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase :Dict = os.path.join(git_repo_path, "src", "diffusers")
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any ) -> int:
__UpperCAmelCase : Optional[Any] = find_backend(''' if not is_torch_available():''' )
self.assertEqual(snake_case , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__UpperCAmelCase : Union[str, Any] = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(snake_case , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__UpperCAmelCase : List[str] = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(snake_case , '''torch_and_transformers_and_onnx''' )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__UpperCAmelCase : Tuple = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , snake_case )
self.assertIn('''torch_and_transformers''' , snake_case )
self.assertIn('''flax_and_transformers''' , snake_case )
self.assertIn('''torch_and_transformers_and_onnx''' , snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase : str = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(snake_case , '''\nCONSTANT = None\n''' )
__UpperCAmelCase : Union[str, Any] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
snake_case , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__UpperCAmelCase : Optional[int] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__UpperCAmelCase : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(snake_case , snake_case )
def lowerCamelCase__ ( self : int ) -> List[Any]:
__UpperCAmelCase : List[str] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__UpperCAmelCase : Optional[int] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , snake_case ) | 240 | 0 |
'''simple docstring'''
UpperCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase ( a_ , a_ , a_ ) -> int:
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Optional[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : Optional[Any] = _calculate(days - 1 , a_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Tuple = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : str = _calculate(days - 1 , a_ , 0 )
A_ : Union[str, Any] = state_late + state_absent + state_ontime
A_ : List[str] = prizestrings
return prizestrings
def UpperCAmelCase ( a_ = 3_0 ) -> int:
"""simple docstring"""
return _calculate(a_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 344 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
__lowercase : str = TypeVar('_T')
class __UpperCamelCase ( Generic[_T] ):
def __init__( self , __a = None ):
'''simple docstring'''
__a : list[_T] = list(iterable or [] )
__a : list[_T] = []
def __len__( self ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
'''simple docstring'''
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self._stacka.append(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self._stacka.pop
__a : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 294 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 1 |
from __future__ import annotations
from collections import deque
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : list[dict] = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []})
for keyword in keywords:
self.add_keyword(lowerCamelCase)
self.set_fail_transitions()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase ( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : List[str] = 0
for character in keyword:
_lowercase : List[Any] = self.find_next_state(lowerCamelCase, lowerCamelCase)
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
_lowercase : Any = len(self.adlist) - 1
else:
_lowercase : Optional[Any] = next_state
self.adlist[current_state]["output"].append(lowerCamelCase)
def UpperCamelCase ( self) -> None:
"""simple docstring"""
_lowercase : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCamelCase)
_lowercase : Any = 0
while q:
_lowercase : Tuple = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCamelCase)
_lowercase : Optional[Any] = self.adlist[r]['fail_state']
while (
self.find_next_state(lowerCamelCase, self.adlist[child]['value']) is None
and state != 0
):
_lowercase : str = self.adlist[state]['fail_state']
_lowercase : int = self.find_next_state(
lowerCamelCase, self.adlist[child]['value'])
if self.adlist[child]["fail_state"] is None:
_lowercase : Optional[int] = 0
_lowercase : Tuple = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def UpperCamelCase ( self, lowerCamelCase) -> dict[str, list[int]]:
"""simple docstring"""
_lowercase : dict = {} # returns a dict with keywords and list of its occurrences
_lowercase : Union[str, Any] = 0
for i in range(len(lowerCamelCase)):
while (
self.find_next_state(lowerCamelCase, string[i]) is None
and current_state != 0
):
_lowercase : int = self.adlist[current_state]['fail_state']
_lowercase : Any = self.find_next_state(lowerCamelCase, string[i])
if next_state is None:
_lowercase : Any = 0
else:
_lowercase : Optional[Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowercase : int = []
result[key].append(i - len(lowerCamelCase) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=64, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=1, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : str = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Any = num_choices
_lowercase : Tuple = scope
_lowercase : Optional[Any] = q_groups
_lowercase : List[str] = k_groups
_lowercase : Optional[int] = v_groups
_lowercase : List[str] = post_attention_groups
_lowercase : Union[str, Any] = intermediate_groups
_lowercase : int = output_groups
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : Dict = None
_lowercase : int = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = SqueezeBertModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Dict = SqueezeBertForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : int = SqueezeBertForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : List[str] = SqueezeBertForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = self.num_choices
_lowercase : str = SqueezeBertForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Dict = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase_ : Optional[int] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : List[str] = True
lowercase_ : int = False
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = SqueezeBertModelTester(self)
_lowercase : Dict = ConfigTester(self, config_class=lowerCamelCase, dim=37)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
_lowercase : Optional[int] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]])
_lowercase : List[str] = model(lowerCamelCase)[0]
_lowercase : Union[str, Any] = torch.Size((1, 3))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Tuple = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-4))
| 21 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase):
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = self.dummy_uncond_unet
__UpperCamelCase = KarrasVeScheduler()
__UpperCamelCase = KarrasVePipeline(unet=lowercase , scheduler=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase , output_type="""numpy""" ).images
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase , output_type="""numpy""" , return_dict=lowercase )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = """google/ncsnpp-celebahq-256"""
__UpperCamelCase = UNetaDModel.from_pretrained(lowercase )
__UpperCamelCase = KarrasVeScheduler()
__UpperCamelCase = KarrasVePipeline(unet=lowercase , scheduler=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(num_inference_steps=2_0 , generator=lowercase , output_type="""numpy""" ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__UpperCamelCase = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 243 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCAmelCase__ ( unittest.TestCase):
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=1_6 , lowercase=2 , lowercase=0.02 , lowercase=4 , ) -> str:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_attention_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_choices
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_attention_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = True
__UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = FlaxBertModelTester(self )
@slow
def __lowerCamelCase ( self ) -> Any:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
__UpperCamelCase = FlaxBertModel.from_pretrained("""bert-base-cased""" )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 243 | 1 |
'''simple docstring'''
from __future__ import annotations
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[int]] = []
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Dict = sum(__snake_case )
create_state_space_tree(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
return result
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
if sum(__snake_case ) > max_sum or (remaining_nums_sum + sum(__snake_case )) < max_sum:
return
if sum(__snake_case ) == max_sum:
result.append(__snake_case )
return
for index in range(__snake_case , len(__snake_case ) ):
create_state_space_tree(
__snake_case , __snake_case , index + 1 , [*path, nums[index]] , __snake_case , remaining_nums_sum - nums[index] , )
__UpperCAmelCase = [3, 34, 4, 12, 5, 2]
__UpperCAmelCase = 9
__UpperCAmelCase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 323 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , )
A__ : Dict =PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
A__ : int =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A__ : str =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
A__ : Optional[int] =CLIPTextModel(lowerCAmelCase_ )
A__ : Dict =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : str ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]=0 ) -> List[str]:
'''simple docstring'''
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
A__ : List[str] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
A__ : List[str] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : List[str] =Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
A__ : int =Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
A__ : str =torch.manual_seed(lowerCAmelCase_ )
else:
A__ : Tuple =torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A__ : Optional[Any] ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Tuple =self.get_dummy_components()
A__ : str =StableDiffusionInpaintPipeline(**lowerCAmelCase_ )
A__ : Any =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[Any] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Dict =sd_pipe(**lowerCAmelCase_ ).images
A__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Optional[Any] =np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Union[str, Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
A__ : Optional[Any] ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : int =StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : str =torch.manual_seed(0 )
A__ : Dict =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : Tuple =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : List[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : List[Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
A__ : int ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : List[Any] =StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Union[str, Any] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Union[str, Any] =torch.manual_seed(0 )
A__ : Dict =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : str =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Union[str, Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : List[str] ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : Any =PNDMScheduler.from_pretrained(lowerCAmelCase_ , subfolder="""scheduler""" )
A__ : Optional[int] =StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Any =torch.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , )
A__ : Dict =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 134 | 0 |
def __lowerCAmelCase ( a__ ) -> Any:
try:
__a = float(_a )
except ValueError:
raise ValueError('''Please enter a valid number''' )
__a = decimal - int(_a )
if fractional_part == 0:
return int(_a ), 1
else:
__a = len(str(_a ).split('''.''' )[1] )
__a = int(decimal * (10**number_of_frac_digits) )
__a = 10**number_of_frac_digits
__a , __a = denominator, numerator
while True:
__a = dividend % divisor
if remainder == 0:
break
__a , __a = divisor, remainder
__a , __a = numerator / divisor, denominator / divisor
return int(_a ), int(_a )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }") | 360 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A : str = parser.parse_args()
if args.model_type == "roberta":
A : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
A : Any = 'roberta'
elif args.model_type == "gpt2":
A : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
A : List[str] = 'transformer'
A : Dict = model.state_dict()
A : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A : Union[str, Any] = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A : Any = F"{prefix}.embeddings.{w}.weight"
A : Union[str, Any] = state_dict[param_name]
for w in ["weight", "bias"]:
A : List[Any] = F"{prefix}.embeddings.LayerNorm.{w}"
A : List[str] = state_dict[param_name]
# Transformer Blocks #
A : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A : Any = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
A : List[str] = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A : Optional[int] = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F"lm_head.dense.{w}"]
A : List[str] = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A : List[str] = state_dict[F"{prefix}.ln_f.{w}"]
A : Dict = state_dict['lm_head.weight']
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint) | 33 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class snake_case_ ( __A ,unittest.TestCase ):
__A : Any = PriorTransformer
__A : Union[str, Any] = "hidden_states"
@property
def __UpperCamelCase ( self : Dict ) -> str:
lowercase__ : Optional[Any] = 4
lowercase__ : Any = 8
lowercase__ : str = 7
lowercase__ : Dict = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ )
lowercase__ : List[str] = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ )
lowercase__ : Dict = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[int]=0 ) -> int:
torch.manual_seed(lowercase_ )
lowercase__ : Tuple = 4
lowercase__ : Optional[int] = 8
lowercase__ : int = 7
lowercase__ : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase__ : Any = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase__ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
return (4, 8)
@property
def __UpperCamelCase ( self : int ) -> int:
return (4, 8)
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
lowercase__ : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ , lowercase__ : Any = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase_ )
lowercase__ : Dict = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase__ , lowercase__ : str = self.prepare_init_args_and_inputs_for_common()
lowercase__ : Any = self.model_class(**lowercase_ )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Optional[Any] = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
lowercase__ : str = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ : Tuple = model.to(lowercase_ )
if hasattr(lowercase_ , "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ : Tuple = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**lowercase_ )[0]
lowercase__ : int = output[0, :5].flatten().cpu()
print(lowercase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ : Dict = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2 ) )
@slow
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Any=1 , lowercase_ : Any=7_68 , lowercase_ : Any=77 , lowercase_ : Tuple=0 ) -> int:
torch.manual_seed(lowercase_ )
lowercase__ : int = batch_size
lowercase__ : Any = embedding_dim
lowercase__ : Dict = num_embeddings
lowercase__ : str = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase__ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase__ : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __UpperCamelCase ( self : Any ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : Optional[int] ) -> List[Any]:
lowercase__ : Tuple = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(lowercase_ )
lowercase__ : Any = self.get_dummy_seed_input(seed=lowercase_ )
with torch.no_grad():
lowercase__ : int = model(**lowercase_ )[0]
assert list(sample.shape ) == [1, 7_68]
lowercase__ : List[Any] = sample[0, :8].flatten().cpu()
print(lowercase_ )
lowercase__ : List[Any] = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1E-3 )
| 87 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
lowerCAmelCase : Optional[int] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase : List[str] = jax.device_count()
lowerCAmelCase : Optional[int] = num_samples * [prompt]
lowerCAmelCase : Any = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = replicate(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = shard(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
lowerCAmelCase : Optional[Any] = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowerCAmelCase : str = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=2_5 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase : List[str] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = '''stabilityai/stable-diffusion-2'''
lowerCAmelCase, lowerCAmelCase : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
lowerCAmelCase, lowerCAmelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
lowerCAmelCase : List[Any] = scheduler_params
lowerCAmelCase : List[Any] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase : Any = jax.device_count()
lowerCAmelCase : int = num_samples * [prompt]
lowerCAmelCase : int = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowerCAmelCase : Dict = replicate(UpperCamelCase_ )
lowerCAmelCase : Tuple = shard(UpperCamelCase_ )
lowerCAmelCase : int = jax.random.PRNGKey(0 )
lowerCAmelCase : Optional[int] = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowerCAmelCase : Tuple = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=2_5 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase : str = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase : Tuple = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 60 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 121 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 121 | 1 |
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
if num < 0:
return False
_UpperCAmelCase = num
_UpperCAmelCase = 0
while num > 0:
_UpperCAmelCase = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
snake_case__ : Union[str, Any] = 500000
snake_case__ , snake_case__ : Optional[Any] = os.path.split(__file__)
snake_case__ : List[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _a ( lowerCamelCase: datasets.Dataset , **lowerCamelCase: Optional[int] ) -> str:
'''simple docstring'''
__A = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase: datasets.Dataset , **lowerCamelCase: Optional[int] ) -> str:
'''simple docstring'''
__A = dataset.filter(**lowerCamelCase )
def _a ( ) -> List[Any]:
'''simple docstring'''
__A = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__A = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
__A = generate_example_dataset(
os.path.join(lowerCamelCase , '''dataset.arrow''' ) , lowerCamelCase , num_examples=lowerCamelCase )
__A = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase )
def tokenize(lowerCamelCase: List[str] ):
return tokenizer(examples['''text'''] )
__A = map(lowerCamelCase )
__A = map(lowerCamelCase , batched=lowerCamelCase )
__A = map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type='''numpy''' ):
__A = map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type='''pandas''' ):
__A = map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
__A = map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
__A = map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
__A = map(lowerCamelCase , function=lowerCamelCase , batched=lowerCamelCase )
__A = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 117 | 0 |
def A ( lowercase ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( lowercase ) -> bool:
'''simple docstring'''
UpperCamelCase = credit_card_number
UpperCamelCase = 0
UpperCamelCase = len(lowercase ) - 2
for i in range(lowercase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase = cc_number[:i] + str(lowercase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowercase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( lowercase ) -> bool:
'''simple docstring'''
UpperCamelCase = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(lowercase ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(lowercase ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(lowercase ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 350 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=0.6 , A_=None , ) -> str:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = TFViTMAEModel(config=A_ )
UpperCamelCase = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TFViTMAEForPreTraining(A_ )
UpperCamelCase = model(A_ , training=A_ )
# expected sequence length = num_patches
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFViTMAEForPreTraining(A_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , training=A_ )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Union[str, Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowercase : Optional[int] = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__lowercase : Optional[Any] = False
__lowercase : Optional[Any] = False
__lowercase : Dict = False
__lowercase : List[str] = False
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Layer ) )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model(A_ , noise=A_ )
UpperCamelCase = copy.deepcopy(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = model(**A_ , noise=A_ )
UpperCamelCase = outputs_dict[0].numpy()
UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(A_ ):
UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(A_ ):
UpperCamelCase = v.numpy()
else:
UpperCamelCase = np.array(A_ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = prepare_numpy_arrays(A_ )
UpperCamelCase = model(A_ , noise=A_ )
UpperCamelCase = model(**A_ , noise=A_ )
self.assert_outputs_same(A_ , A_ )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
# make masks reproducible
np.random.seed(2 )
UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.constant(A_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = tf_noise
super().check_pt_tf_models(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(A_ )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(A_ , A_ ),)
if isinstance(A_ , A_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(A_ , '_keras_serializable' , A_ )
}
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.convert_to_tensor(A_ )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase = main_layer_class(A_ )
UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase = tf.keras.Model(A_ , outputs=main_layer(A_ ) )
UpperCamelCase = model(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(A_ , 'keras_model.h5' )
model.save(A_ )
UpperCamelCase = tf.keras.models.load_model(
A_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(A_ , tf.keras.Model )
UpperCamelCase = model(A_ )
self.assert_outputs_same(A_ , A_ )
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model(A_ , noise=A_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = outputs.last_hidden_state.numpy()
UpperCamelCase = 0
else:
UpperCamelCase = outputs.logits.numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase = model_class.from_pretrained(A_ )
UpperCamelCase = model(A_ , noise=A_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = after_outputs['last_hidden_state'].numpy()
UpperCamelCase = 0
else:
UpperCamelCase = after_outputs['logits'].numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1e-5 )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model(A_ , noise=A_ )
UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(A_ )
UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase = model_class.from_config(model.config )
UpperCamelCase = new_model(A_ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase = new_model(A_ , noise=A_ )
self.assert_outputs_same(A_ , A_ )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(A_ )
def A ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCamelCase = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase = model(**A_ , noise=A_ )
# verify the logits
UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , A_ , atol=1e-4 )
| 110 | 0 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : bytes ) -> bytes:
if len(_UpperCAmelCase ) != 32:
raise ValueError("""Input must be of length 32""" )
_a : Union[str, Any] =B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bytes:
if i < 0:
raise ValueError("""Input must be non-negative""" )
_a : Dict =format(_UpperCAmelCase ,"""08x""" )[-8:]
_a : Optional[Any] =B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : bytes ) -> bytes:
_a : Tuple =B""""""
for char in message:
bit_string += format(_UpperCAmelCase ,"""08b""" ).encode("""utf-8""" )
_a : Optional[Any] =format(len(_UpperCAmelCase ) ,"""064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : bytes ) -> Generator[list[int], None, None]:
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 ,len(_UpperCAmelCase ) ,512 ):
_a : Optional[Any] =bit_string[pos : pos + 512]
_a : Any =[]
for i in range(0 ,512 ,32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) ,2 ) )
yield block_words
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
_a : Any =format(_UpperCAmelCase ,"""032b""" )
_a : str =""""""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase ,2 )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return (a + b) % 2**32
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : bytes ) -> bytes:
_a : List[str] =preprocess(_UpperCAmelCase )
_a : Optional[int] =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a : Union[str, Any] =0x67_452_301
_a : List[Any] =0xEF_CDA_B89
_a : int =0x98_BAD_CFE
_a : List[str] =0x10_325_476
_a : List[Any] =[
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
_a : str =aa
_a : str =ba
_a : Optional[int] =ca
_a : List[str] =da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] =d ^ (b & (c ^ d))
_a : Dict =i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] =c ^ (d & (b ^ c))
_a : List[Any] =(5 * i + 1) % 16
elif i <= 47:
_a : Union[str, Any] =b ^ c ^ d
_a : Union[str, Any] =(3 * i + 5) % 16
else:
_a : Any =c ^ (b | not_aa(_UpperCAmelCase ))
_a : Optional[Any] =(7 * i) % 16
_a : Tuple =(f + a + added_consts[i] + block_words[g]) % 2**32
_a : List[Any] =d
_a : List[str] =c
_a : int =b
_a : int =sum_aa(_UpperCAmelCase ,left_rotate_aa(_UpperCAmelCase ,shift_amounts[i] ) )
# Add hashed chunk to running total
_a : int =sum_aa(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =sum_aa(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =sum_aa(_UpperCAmelCase ,_UpperCAmelCase )
_a : Any =sum_aa(_UpperCAmelCase ,_UpperCAmelCase )
_a : Any =reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 |
'''simple docstring'''
A__: Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 276 | 1 |
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _snake_case ( SCREAMING_SNAKE_CASE__ ):
_lowercase : List[Any] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_lowercase : int = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_lowercase : List[Any] = '''document_qa'''
_lowercase : List[str] = AutoProcessor
_lowercase : str = VisionEncoderDecoderModel
_lowercase : Optional[Any] = ['''image''', '''text''']
_lowercase : Tuple = ['''text''']
def __init__( self , *a , **a) -> List[str]:
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.')
super().__init__(*a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[Any]:
SCREAMING_SNAKE_CASE = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
SCREAMING_SNAKE_CASE = task_prompt.replace('{user_input}' , a)
SCREAMING_SNAKE_CASE = self.pre_processor.tokenizer(
a , add_special_tokens=a , return_tensors='pt').input_ids
SCREAMING_SNAKE_CASE = self.pre_processor(a , return_tensors='pt').pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
return self.model.generate(
inputs['pixel_values'].to(self.device) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a , ).sequences
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
SCREAMING_SNAKE_CASE = self.pre_processor.batch_decode(a)[0]
SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.eos_token , '')
SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.pad_token , '')
SCREAMING_SNAKE_CASE = re.sub(R'<.*?>' , '' , a , count=1).strip() # remove first task start token
SCREAMING_SNAKE_CASE = self.pre_processor.tokenajson(a)
return sequence["answer"]
| 359 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a_ : Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( A__ ):
def __init__( self , *a , a=None , a=None , a=None , **a) -> List[Any]:
super().__init__(*a , **a)
SCREAMING_SNAKE_CASE = eval_examples
SCREAMING_SNAKE_CASE = post_process_function
SCREAMING_SNAKE_CASE = quant_trainer_args
SCREAMING_SNAKE_CASE = 128 # default number of calibration samples
def SCREAMING_SNAKE_CASE__ ( self , a=None) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE = self._remove_unused_columns(a , description='Calibration')
return DataLoader(
a , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=a , )
def SCREAMING_SNAKE_CASE__ ( self , a=None) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE = self.get_calib_dataloader(a)
SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(a , self.quant_trainer_args , calib=a)
model.eval()
quant_trainer.enable_calibration(a)
logger.info('***** Running calibration *****')
logger.info(f''' Num examples = {self.calib_num}''')
logger.info(f''' Batch size = {calib_dataloader.batch_size}''')
for step, inputs in enumerate(a):
# Prediction step
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prediction_step(a , a , prediction_loss_only=a)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(a , self.quant_trainer_args)
SCREAMING_SNAKE_CASE = model
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a=None , a = "eval") -> str:
SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(a)
SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
a , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE = self.post_process_function(a , a , output.predictions)
SCREAMING_SNAKE_CASE = self.compute_metrics(a)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
SCREAMING_SNAKE_CASE = metrics.pop(a)
self.log(a)
else:
SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , a)
return metrics
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=None , a = "test") -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.get_test_dataloader(a)
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE = eval_loop(
a , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE = self.post_process_function(a , a , output.predictions , 'predict')
SCREAMING_SNAKE_CASE = self.compute_metrics(a)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
SCREAMING_SNAKE_CASE = metrics.pop(a)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a)
def SCREAMING_SNAKE_CASE__ ( self , a="./") -> List[Any]:
SCREAMING_SNAKE_CASE = self.eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(a)
SCREAMING_SNAKE_CASE = next(iter(a))
# saving device - to make it consistent
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
SCREAMING_SNAKE_CASE = tuple(v.to(a) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.model.to(a)
model.eval()
model.float()
SCREAMING_SNAKE_CASE = model.module if hasattr(a , 'module') else model
quant_trainer.configure_model(a , self.quant_trainer_args)
SCREAMING_SNAKE_CASE = os.path.join(a , 'model.onnx')
logger.info(f'''exporting model to {output_model_file}''')
SCREAMING_SNAKE_CASE = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
a , a , a , export_params=a , opset_version=13 , do_constant_folding=a , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=a , )
logger.info('onnx export finished')
| 327 | 0 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return abs(__snake_case ) if a == 0 else greatest_common_divisor(b % a , __snake_case )
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = y, x % y
return abs(__snake_case )
def lowercase__ ( ):
'''simple docstring'''
try:
UpperCAmelCase_ : str = input('Enter two integers separated by comma (,): ' ).split(',' )
UpperCAmelCase_ : Union[str, Any] = int(nums[0] )
UpperCAmelCase_ : Optional[Any] = int(nums[1] )
print(
F"greatest_common_divisor({num_a}, {num_a}) = "
F"{greatest_common_divisor(__snake_case , __snake_case )}" )
print(F"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__snake_case , __snake_case )}" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 29 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : int = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
lowercase__ : int = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Optional[int] = """sigmoid"""
__lowerCamelCase : List[Any] = """softmax"""
__lowerCamelCase : Optional[Any] = """none"""
@add_end_docstrings(
__snake_case , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = ClassificationFunction.NONE
def __init__( self , **a):
super().__init__(**a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def snake_case_ ( self , a=None , a=None , a="" , **a):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowercase__ : List[str] = tokenizer_kwargs
lowercase__ : Dict = {}
if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None:
lowercase__ : Union[str, Any] = self.model.config.return_all_scores
if isinstance(a , a) or top_k is None:
lowercase__ : Tuple = top_k
lowercase__ : Any = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
lowercase__ : Union[str, Any] = None
else:
lowercase__ : Union[str, Any] = 1
if isinstance(a , a):
lowercase__ : Tuple = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowercase__ : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a):
lowercase__ : int = super().__call__(*a , **a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowercase__ : Any = 'top_k' not in kwargs
if isinstance(args[0] , a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case_ ( self , a , **a):
lowercase__ : List[str] = self.framework
if isinstance(a , a):
return self.tokenizer(**a , return_tensors=a , **a)
elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a)
elif isinstance(a , a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.')
return self.tokenizer(a , return_tensors=a , **a)
def snake_case_ ( self , a):
return self.model(**a)
def snake_case_ ( self , a , a=None , a=1 , a=True):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowercase__ : Union[str, Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowercase__ : Union[str, Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None:
lowercase__ : List[str] = self.model.config.function_to_apply
else:
lowercase__ : Union[str, Any] = ClassificationFunction.NONE
lowercase__ : str = model_outputs['logits'][0]
lowercase__ : Dict = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowercase__ : int = sigmoid(a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowercase__ : List[Any] = softmax(a)
elif function_to_apply == ClassificationFunction.NONE:
lowercase__ : Any = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""")
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowercase__ : Any = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a)
]
if not _legacy:
dict_scores.sort(key=lambda a: x["score"] , reverse=a)
if top_k is not None:
lowercase__ : Optional[Any] = dict_scores[:top_k]
return dict_scores
| 214 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase : Tuple = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowerCAmelCase : List[str] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
_lowerCAmelCase : List[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __a ( self : List[Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __a ( self : Dict , snake_case__ : str , snake_case__ : Dict , snake_case__ : str = False , snake_case__ : Any = False , snake_case__ : List[Any] = False , snake_case__ : List[Any] = False , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCAmelCase__ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
UpperCAmelCase__ : Union[str, Any] = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
UpperCAmelCase__ : Optional[Any] = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 363 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = list(range(snake_case ) )
# Find permutation
while factorials:
UpperCAmelCase__ : str = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
a = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowercase ( self : Optional[Any] ):
return 12
@property
def __lowercase ( self : int ):
return 12
@property
def __lowercase ( self : List[Any] ):
return 32
@property
def __lowercase ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __lowercase ( self : List[str] ):
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __lowercase ( self : Dict ):
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase )
@property
def __lowercase ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase = 12
lowerCAmelCase = 12
lowerCAmelCase = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
lowerCAmelCase = TransformeraDModel(**lowerCAmelCase )
return model
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.dummy_vqvae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_transformer
lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCAmelCase )
lowerCAmelCase = VQDiffusionPipeline(
vqvae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , transformer=lowerCAmelCase , scheduler=lowerCAmelCase , learned_classifier_free_sampling_embeddings=lowerCAmelCase , )
lowerCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = """teddy bear playing in the pool"""
lowerCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase = pipe([prompt] , generator=lowerCAmelCase , num_inference_steps=2 , output_type="""np""" )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase = pipe(
[prompt] , generator=lowerCAmelCase , output_type="""np""" , return_dict=lowerCAmelCase , num_inference_steps=2 )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCAmelCase = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self : Tuple ):
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.dummy_vqvae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_transformer
lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCAmelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase = VQDiffusionPipeline(
vqvae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , transformer=lowerCAmelCase , scheduler=lowerCAmelCase , learned_classifier_free_sampling_embeddings=lowerCAmelCase , )
lowerCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = """teddy bear playing in the pool"""
lowerCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase = pipe([prompt] , generator=lowerCAmelCase , num_inference_steps=2 , output_type="""np""" )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase = pipe(
[prompt] , generator=lowerCAmelCase , output_type="""np""" , return_dict=lowerCAmelCase , num_inference_steps=2 )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCAmelCase = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int ):
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
lowerCAmelCase = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
lowerCAmelCase = pipeline.to(lowerCAmelCase )
pipeline.set_progress_bar_config(disable=lowerCAmelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=lowerCAmelCase , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 155 |
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> list:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCAmelCase = result + left + right
return input_list
def lowercase (snake_case__ : list ) -> list:
'''simple docstring'''
if len(snake_case__ ) <= 1:
return input_list
lowerCAmelCase = list(snake_case__ )
# iteration for two-way merging
lowerCAmelCase = 2
while p <= len(snake_case__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(snake_case__ ) , snake_case__ ):
lowerCAmelCase = i
lowerCAmelCase = i + p - 1
lowerCAmelCase = (low + high + 1) // 2
lowerCAmelCase = merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# final merge of last two parts
if p * 2 >= len(snake_case__ ):
lowerCAmelCase = i
lowerCAmelCase = merge(snake_case__ , 0 , snake_case__ , len(snake_case__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
a = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
a = []
else:
a = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 155 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = StableDiffusionPanoramaPipeline
__A : str = TEXT_TO_IMAGE_PARAMS
__A : int = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a__ : int = DDIMScheduler()
torch.manual_seed(0)
a__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
a__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a__ : Dict = CLIPTextModel(lowerCamelCase_)
a__ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
a__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self , lowercase , lowercase=0) -> Optional[int]:
'''simple docstring'''
a__ : Dict = torch.manual_seed(lowerCamelCase_)
a__ : int = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : Any = self.get_dummy_components()
a__ : Any = StableDiffusionPanoramaPipeline(**lowerCamelCase_)
a__ : Tuple = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
a__ : Any = self.get_dummy_inputs(lowerCamelCase_)
a__ : int = sd_pipe(**lowerCamelCase_).images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : Any = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : Optional[Any] = self.get_dummy_components()
a__ : Dict = StableDiffusionPanoramaPipeline(**lowerCamelCase_)
a__ : Dict = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
a__ : Union[str, Any] = self.get_dummy_inputs(lowerCamelCase_)
a__ : Union[str, Any] = 'french fries'
a__ : Tuple = sd_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_)
a__ : Union[str, Any] = output.images
a__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : List[str] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : Dict = self.get_dummy_components()
a__ : Dict = StableDiffusionPanoramaPipeline(**lowerCamelCase_)
a__ : Dict = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
a__ : Optional[Any] = self.get_dummy_inputs(lowerCamelCase_)
a__ : Any = sd_pipe(**lowerCamelCase_ , view_batch_size=2)
a__ : List[Any] = output.images
a__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : List[str] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : List[Any] = self.get_dummy_components()
a__ : int = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear')
a__ : str = StableDiffusionPanoramaPipeline(**lowerCamelCase_)
a__ : List[str] = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
a__ : str = self.get_dummy_inputs(lowerCamelCase_)
a__ : Optional[int] = sd_pipe(**lowerCamelCase_).images
a__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : Optional[int] = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : List[str] = self.get_dummy_components()
a__ : List[str] = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , skip_prk_steps=lowerCamelCase_)
a__ : Optional[Any] = StableDiffusionPanoramaPipeline(**lowerCamelCase_)
a__ : Any = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
a__ : str = self.get_dummy_inputs(lowerCamelCase_)
a__ : List[Any] = sd_pipe(**lowerCamelCase_).images
a__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : int = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , lowercase=0) -> int:
'''simple docstring'''
a__ : List[Any] = torch.manual_seed(lowerCamelCase_)
a__ : str = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : str = 'stabilityai/stable-diffusion-2-base'
a__ : List[Any] = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler')
a__ : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
a__ : int = self.get_inputs()
a__ : Tuple = pipe(**lowerCamelCase_).images
a__ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
a__ : str = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
])
assert np.abs(expected_slice - image_slice).max() < 1e-2
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Any = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=lowerCamelCase_)
a__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
a__ : Dict = self.get_inputs()
a__ : List[Any] = pipe(**lowerCamelCase_).images
a__ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
a__ : Dict = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : str = 0
def callback_fn(lowercase , lowercase , lowercase) -> None:
a__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
a__ : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
a__ : Optional[int] = latents[0, -3:, -3:, -1]
a__ : Union[str, Any] = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
a__ : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
a__ : List[Any] = latents[0, -3:, -3:, -1]
a__ : str = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
a__ : Any = False
a__ : Optional[int] = 'stabilityai/stable-diffusion-2-base'
a__ : Tuple = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler')
a__ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_)
a__ : Optional[Any] = pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
a__ : Optional[int] = self.get_inputs()
pipe(**lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase ( self) -> str:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = 'stabilityai/stable-diffusion-2-base'
a__ : Tuple = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler')
a__ : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_)
a__ : Any = pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
a__ : Optional[Any] = self.get_inputs()
a__ : str = pipe(**lowerCamelCase_)
a__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 355 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : str = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.