code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = n
__lowercase = [None] * self.n
__lowercase = 0 # index of the first element
__lowercase = 0
__lowercase = 0
def __len__( self : int ) -> int:
"""simple docstring"""
return self.size
def _a ( self : Tuple ) -> bool:
"""simple docstring"""
return self.size == 0
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def _a ( self : List[str] , _lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
__lowercase = data
__lowercase = (self.rear + 1) % self.n
self.size += 1
return self
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
if self.size == 0:
raise Exception("""UNDERFLOW""" )
__lowercase = self.array[self.front]
__lowercase = None
__lowercase = (self.front + 1) % self.n
self.size -= 1
return temp
| 53
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case ( ):
'''simple docstring'''
__lowercase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCamelCase )
return parser.parse_args()
def snake_case ( ):
'''simple docstring'''
__lowercase = parse_args()
# Import training_script as a module.
__lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowercase = script_fpath.stem
__lowercase = importlib.import_module(lowerCamelCase )
# Patch sys.argv
__lowercase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 53
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , **_lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = feature_size
__lowercase = sampling_rate
__lowercase = padding_value
__lowercase = kwargs.pop("""padding_side""" , """right""" )
__lowercase = kwargs.pop("""return_attention_mask""" , _lowerCAmelCase )
super().__init__(**_lowerCAmelCase )
def _a ( self : List[Any] , _lowerCAmelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = True , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
"""simple docstring"""
if isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__lowercase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
__lowercase = processed_features[self.model_input_names[0]]
__lowercase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_lowerCAmelCase ) == 0:
if return_attention_mask:
__lowercase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__lowercase = required_input[0]
if isinstance(_lowerCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__lowercase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_lowerCAmelCase ):
__lowercase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_lowerCAmelCase ):
__lowercase = """tf"""
elif is_torch_tensor(_lowerCAmelCase ):
__lowercase = """pt"""
elif isinstance(_lowerCAmelCase , (int, float, list, tuple, np.ndarray) ):
__lowercase = """np"""
else:
raise ValueError(
F'type of {first_element} unknown: {type(_lowerCAmelCase )}. '
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__lowercase = to_numpy(_lowerCAmelCase )
else:
__lowercase = [to_numpy(_lowerCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__lowercase = self._get_padding_strategies(padding=_lowerCAmelCase , max_length=_lowerCAmelCase )
__lowercase = processed_features[self.model_input_names[0]]
__lowercase = len(_lowerCAmelCase )
if not all(len(_lowerCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__lowercase = []
for i in range(_lowerCAmelCase ):
__lowercase = {k: v[i] for k, v in processed_features.items()}
# truncation
__lowercase = self._truncate(
_lowerCAmelCase , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
truncated_inputs.append(_lowerCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__lowercase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__lowercase = PaddingStrategy.MAX_LENGTH
__lowercase = {}
for i in range(_lowerCAmelCase ):
# padding
__lowercase = self._pad(
truncated_inputs[i] , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__lowercase = []
if value.dtype is np.dtype(np.floataa ):
__lowercase = value.astype(np.floataa )
batch_outputs[key].append(_lowerCAmelCase )
return BatchFeature(_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def _a ( self : int , _lowerCAmelCase : Union[Dict[str, np.ndarray], BatchFeature] , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , ) -> dict:
"""simple docstring"""
__lowercase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__lowercase = len(_lowerCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowercase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_lowerCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__lowercase = np.ones(len(_lowerCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__lowercase = max_length - len(_lowerCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
__lowercase = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__lowercase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__lowercase = np.pad(
_lowerCAmelCase , _lowerCAmelCase , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__lowercase = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__lowercase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__lowercase = np.pad(
_lowerCAmelCase , _lowerCAmelCase , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def _a ( self : str , _lowerCAmelCase : Union[Dict[str, np.ndarray], BatchFeature] , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , ) -> List[Any]:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__lowercase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowercase = len(_lowerCAmelCase ) > max_length
if needs_to_be_truncated:
__lowercase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__lowercase = processed_features["""attention_mask"""][:max_length]
return processed_features
def _a ( self : Union[str, Any] , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Tuple=None ) -> Optional[Any]:
"""simple docstring"""
if padding is not False:
if padding is True:
__lowercase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = PaddingStrategy(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = padding
else:
__lowercase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 53
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 1
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = LxmertConfig.from_json_file(lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
__lowercase = LxmertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : List[str] = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Optional[int] = {
"""yjernite/retribert-base-uncased""": 512,
}
__UpperCamelCase : List[Any] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = VOCAB_FILES_NAMES
__snake_case :int = PRETRAINED_VOCAB_FILES_MAP
__snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :List[Any] = PRETRAINED_INIT_CONFIGURATION
__snake_case :str = RetriBertTokenizer
__snake_case :Dict = ['input_ids', 'attention_mask']
def __init__( self : Tuple , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]="[UNK]" , _lowerCAmelCase : List[Any]="[SEP]" , _lowerCAmelCase : Dict="[PAD]" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : List[str]="[MASK]" , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCAmelCase )
__lowercase = do_lower_case
def _a ( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None ) -> Optional[int]:
"""simple docstring"""
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : str , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 53
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCamelCase : int = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
__lowercase = self.transformer_dir
shutil.copy(
os.path.join(_lowerCAmelCase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=None ) -> str:
"""simple docstring"""
__lowercase = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
__lowercase = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
__lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__lowercase = black.format_str(_lowerCAmelCase , mode=_lowerCAmelCase )
__lowercase = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_lowerCAmelCase , """w""" , newline="""\n""" ) as f:
f.write(_lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCAmelCase )
with open(_lowerCAmelCase , """r""" ) as f:
self.assertTrue(f.read() , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _lowerCAmelCase ) , )
# Copy consistency with a really long name
__lowercase = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , _lowerCAmelCase , _lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _lowerCAmelCase , overwrite_result=re.sub("""Bert""" , """TestModel""" , _lowerCAmelCase ) , )
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
__lowercase , __lowercase = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme["""format_model_list"""] )
self.assertFalse(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase , __lowercase = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowerCAmelCase )
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__lowercase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__lowercase , __lowercase = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 53
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 1
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def snake_case ( ):
'''simple docstring'''
__lowercase = 10
__lowercase = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
__lowercase = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowerCamelCase ) ),
} , features=lowerCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowerCamelCase )
return filename
# FILE_CONTENT + files
__UpperCamelCase : int = """\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
__lowercase = FILE_CONTENT
with open(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
import bza
__lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
__lowercase = bytes(lowerCamelCase , """utf-8""" )
with bza.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
import gzip
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
__lowercase = bytes(lowerCamelCase , """utf-8""" )
with gzip.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
__lowercase = bytes(lowerCamelCase , """utf-8""" )
with lza.frame.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowerCamelCase , """w""" ) as archive:
archive.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
import tarfile
__lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowerCamelCase , """w""" ) as f:
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
import lzma
__lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
__lowercase = bytes(lowerCamelCase , """utf-8""" )
with lzma.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
import zipfile
__lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__lowercase = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
__lowercase = bytes(lowerCamelCase , """utf-8""" )
with zstd.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
__lowercase = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase )
return filename
__UpperCamelCase : Any = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
__UpperCamelCase : Union[str, Any] = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
__UpperCamelCase : Tuple = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
__UpperCamelCase : Dict = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
__UpperCamelCase : Optional[int] = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def snake_case ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = datasets.Dataset.from_dict(lowerCamelCase )
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowerCamelCase ) ) as con:
__lowercase = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowerCamelCase , """w""" , newline="""""" ) as f:
__lowercase = csv.DictWriter(lowerCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowerCamelCase , """w""" , newline="""""" ) as f:
__lowercase = csv.DictWriter(lowerCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
import bza
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase , """wb""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowerCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
__lowercase = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowerCamelCase , """wb""" ) as f:
__lowercase = pq.ParquetWriter(lowerCamelCase , schema=lowerCamelCase )
__lowercase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase ) )] for k in DATA[0]} , schema=lowerCamelCase )
writer.write_table(lowerCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
__lowercase = {"""data""": DATA}
with open(lowerCamelCase , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
__lowercase = {"""data""": DATA_DICT_OF_LISTS}
with open(lowerCamelCase , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowerCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowerCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowerCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowerCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
import gzip
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowerCamelCase , """rb""" ) as orig_file:
with gzip.open(lowerCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
import gzip
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowerCamelCase , """rb""" ) as orig_file:
with gzip.open(lowerCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase , """w""" ) as f:
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase , """w""" ) as f:
f.add(lowerCamelCase , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = ["""0""", """1""", """2""", """3"""]
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowerCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = ["""0""", """1""", """2""", """3"""]
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowerCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = ["""0""", """1""", """2""", """3"""]
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowerCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowerCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
__lowercase = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def snake_case ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowerCamelCase , """w""" ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 53
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[str] = 'camembert'
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=3_0522 , _lowerCAmelCase : str=768 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : List[Any]=12 , _lowerCAmelCase : int=3072 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[str]=512 , _lowerCAmelCase : int=2 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Dict=1e-12 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Any="absolute" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class __UpperCamelCase ( _lowerCAmelCase ):
@property
def _a ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 53
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 1
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = ['image_processor', 'tokenizer']
__snake_case :Dict = 'OwlViTImageProcessor'
__snake_case :str = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : str , _lowerCAmelCase : Any=None , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCAmelCase , )
__lowercase = kwargs.pop("""feature_extractor""" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self : str , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Dict="max_length" , _lowerCAmelCase : Optional[Any]="np" , **_lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) or (isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not isinstance(text[0] , _lowerCAmelCase )):
__lowercase = [self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(text[0] , _lowerCAmelCase ):
__lowercase = []
# Maximum number of queries across batch
__lowercase = max([len(_lowerCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCAmelCase ) != max_num_queries:
__lowercase = t + [""" """] * (max_num_queries - len(_lowerCAmelCase ))
__lowercase = self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
encodings.append(_lowerCAmelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
__lowercase = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__lowercase = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__lowercase = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
__lowercase = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
__lowercase = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
__lowercase = BatchEncoding()
__lowercase = input_ids
__lowercase = attention_mask
if query_images is not None:
__lowercase = BatchEncoding()
__lowercase = self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ).pixel_values
__lowercase = query_pixel_values
if images is not None:
__lowercase = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def _a ( self : Optional[int] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
return self.image_processor.post_process(*_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : int , *_lowerCAmelCase : Dict , **_lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : str , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : str ) -> List[Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Tuple , *_lowerCAmelCase : int , **_lowerCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Optional[int] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def _a ( self : int ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCAmelCase , )
return self.image_processor_class
@property
def _a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCAmelCase , )
return self.image_processor
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = int(number**0.5 )
return number == sq * sq
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__lowercase = x_den * y_den * z_den
__lowercase = gcd(lowerCamelCase , lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def snake_case ( lowerCamelCase = 35 ):
'''simple docstring'''
__lowercase = set()
__lowercase = 42
__lowercase = Fraction(0 )
__lowercase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__lowercase = x_num * y_den + x_den * y_num
__lowercase = x_den * y_den
__lowercase = gcd(lowerCamelCase , lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowercase = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
unique_s.add(lowerCamelCase )
# n=2
__lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__lowercase = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase ) and is_sq(lowerCamelCase ):
__lowercase = int(sqrt(lowerCamelCase ) )
__lowercase = int(sqrt(lowerCamelCase ) )
__lowercase = gcd(lowerCamelCase , lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowercase = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
unique_s.add(lowerCamelCase )
# n=-1
__lowercase = x_num * y_num
__lowercase = x_den * y_num + x_num * y_den
__lowercase = gcd(lowerCamelCase , lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowercase = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
unique_s.add(lowerCamelCase )
# n=2
__lowercase = x_num * x_num * y_num * y_num
__lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase ) and is_sq(lowerCamelCase ):
__lowercase = int(sqrt(lowerCamelCase ) )
__lowercase = int(sqrt(lowerCamelCase ) )
__lowercase = gcd(lowerCamelCase , lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowercase = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
unique_s.add(lowerCamelCase )
for num, den in unique_s:
total += Fraction(lowerCamelCase , lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = 'decision_transformer'
__snake_case :List[str] = ['past_key_values']
__snake_case :str = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[str]=17 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Any=128 , _lowerCAmelCase : List[str]=4096 , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : List[str]=1024 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any="relu" , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[str]=1e-5 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=5_0256 , _lowerCAmelCase : Tuple=5_0256 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : List[Any]=False , **_lowerCAmelCase : str , ) -> Tuple:
"""simple docstring"""
__lowercase = state_dim
__lowercase = act_dim
__lowercase = hidden_size
__lowercase = max_ep_len
__lowercase = action_tanh
__lowercase = vocab_size
__lowercase = n_positions
__lowercase = n_layer
__lowercase = n_head
__lowercase = n_inner
__lowercase = activation_function
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = attn_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = scale_attn_weights
__lowercase = use_cache
__lowercase = scale_attn_by_inverse_layer_idx
__lowercase = reorder_and_upcast_attn
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 53
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__UpperCamelCase : Tuple = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
def __init__( self : Union[str, Any] , _lowerCAmelCase : str=False , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : List[Any]=6.0 , _lowerCAmelCase : int=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple="fp4" , _lowerCAmelCase : List[str]=False , **_lowerCAmelCase : Any , ) -> Dict:
"""simple docstring"""
__lowercase = load_in_abit
__lowercase = load_in_abit
__lowercase = llm_inta_threshold
__lowercase = llm_inta_skip_modules
__lowercase = llm_inta_enable_fpaa_cpu_offload
__lowercase = llm_inta_has_fpaa_weight
__lowercase = bnb_abit_quant_type
__lowercase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__lowercase = torch.floataa
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = getattr(_lowerCAmelCase , _lowerCAmelCase )
elif isinstance(_lowerCAmelCase , torch.dtype ):
__lowercase = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , _lowerCAmelCase ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _lowerCAmelCase ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _lowerCAmelCase ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , _lowerCAmelCase ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , _lowerCAmelCase ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , _lowerCAmelCase ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , **_lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = cls(**_lowerCAmelCase )
__lowercase = []
for key, value in kwargs.items():
if hasattr(_lowerCAmelCase , _lowerCAmelCase ):
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
to_remove.append(_lowerCAmelCase )
for key in to_remove:
kwargs.pop(_lowerCAmelCase , _lowerCAmelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _a ( self : Union[str, Any] , _lowerCAmelCase : Union[str, os.PathLike] ) -> Tuple:
"""simple docstring"""
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
__lowercase = self.to_dict()
__lowercase = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + """\n"""
writer.write(_lowerCAmelCase )
def _a ( self : List[str] ) -> Dict[str, Any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self : int ) -> List[str]:
"""simple docstring"""
return F'{self.__class__.__name__} {self.to_json_string()}'
def _a ( self : List[Any] , _lowerCAmelCase : bool = True ) -> str:
"""simple docstring"""
if use_diff is True:
__lowercase = self.to_diff_dict()
else:
__lowercase = self.to_dict()
return json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
def _a ( self : str ) -> Dict[str, Any]:
"""simple docstring"""
__lowercase = self.to_dict()
# get the default config dict
__lowercase = BitsAndBytesConfig().to_dict()
__lowercase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__lowercase = value
return serializable_config_dict
| 53
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( _lowerCAmelCase ):
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , """num_heads""" ) )
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : Optional[int]=[16, 48, 96] , _lowerCAmelCase : int=[1, 3, 6] , _lowerCAmelCase : int=[1, 2, 10] , _lowerCAmelCase : Optional[Any]=[7, 3, 3] , _lowerCAmelCase : Union[str, Any]=[4, 2, 2] , _lowerCAmelCase : Optional[int]=[2, 1, 1] , _lowerCAmelCase : List[Any]=[2, 2, 2] , _lowerCAmelCase : Optional[Any]=[False, False, True] , _lowerCAmelCase : Optional[Any]=[0.0, 0.0, 0.0] , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Union[str, Any]=1e-12 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple=2 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_sizes
__lowercase = patch_stride
__lowercase = patch_padding
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = num_heads
__lowercase = stride_kv
__lowercase = depth
__lowercase = cls_token
__lowercase = attention_drop_rate
__lowercase = initializer_range
__lowercase = layer_norm_eps
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
# create a random int32 tensor of given shape
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ) -> str:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _a ( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
__lowercase = TFCvtModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _a ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFCvtForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__snake_case :Optional[Any] = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__snake_case :str = False
__snake_case :int = False
__snake_case :List[Any] = False
__snake_case :str = False
__snake_case :str = False
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = TFCvtModelTester(self )
__lowercase = TFCvtConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def _a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_lowerCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : int ) -> int:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depth )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFCvtModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCAmelCase , atol=1e-4 ) )
| 53
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 1
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :int = BioGptTokenizer
__snake_case :List[Any] = False
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__lowercase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def _a ( self : List[str] , _lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = """lower newer"""
__lowercase = """lower newer"""
return input_text, output_text
def _a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = BioGptTokenizer(self.vocab_file , self.merges_file )
__lowercase = """lower"""
__lowercase = ["""low""", """er</w>"""]
__lowercase = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = tokens + ["""<unk>"""]
__lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
__lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
__lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 53
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any=13 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : int=4 , _lowerCAmelCase : List[str]=[10, 20, 30, 40] , _lowerCAmelCase : Tuple=[2, 2, 3, 2] , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[int]=37 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Any=10 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Optional[int]=["stage2", "stage3", "stage4"] , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = out_features
__lowercase = num_labels
__lowercase = scope
__lowercase = num_stages
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ) -> int:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _a ( self : Dict ) -> str:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _a ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = UperNetForSemanticSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _a ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__snake_case :List[str] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__snake_case :Any = False
__snake_case :Any = False
__snake_case :List[str] = False
__snake_case :Optional[int] = False
__snake_case :Any = False
__snake_case :List[str] = False
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = UperNetModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a ( self : int ) -> Any:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> int:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ):
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = _config_zero_init(_lowerCAmelCase )
__lowercase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@slow
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = UperNetForSemanticSegmentation.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
__lowercase = Image.open(lowerCamelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
__lowercase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowerCAmelCase )
__lowercase = prepare_img()
__lowercase = processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
__lowercase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
__lowercase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowerCAmelCase )
__lowercase = prepare_img()
__lowercase = processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
__lowercase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=13 , _lowerCAmelCase : Dict=10 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Any=True , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Any=5 , _lowerCAmelCase : int=4 , _lowerCAmelCase : int=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Union[str, Any]=10 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=0.9 , _lowerCAmelCase : List[Any]=None , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = tubelet_size
__lowercase = num_frames
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__lowercase = (image_size // patch_size) ** 2
__lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__lowercase = int(mask_ratio * self.seq_length )
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = VideoMAEModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = VideoMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowercase = torch.ones((self.num_masks,) )
__lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__lowercase = mask.expand(self.batch_size , -1 ).bool()
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# model only returns predictions for masked patches
__lowercase = mask.sum().item()
__lowercase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :str = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__snake_case :Any = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__snake_case :List[Any] = False
__snake_case :List[Any] = False
__snake_case :List[Any] = False
__snake_case :Union[str, Any] = False
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = VideoMAEModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple=False ) -> int:
"""simple docstring"""
__lowercase = copy.deepcopy(_lowerCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowercase = torch.ones((self.model_tester.num_masks,) )
__lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__lowercase = mask.expand(self.model_tester.batch_size , -1 ).bool()
__lowercase = bool_masked_pos.to(_lowerCAmelCase )
if return_labels:
if model_class in [
*get_values(_lowerCAmelCase ),
]:
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
@slow
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = VideoMAEModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
__lowercase = self.model_tester.seq_length - self.model_tester.num_masks
__lowercase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowercase = len(_lowerCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) )
__lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.hidden_states
__lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = self.model_tester.seq_length - self.model_tester.num_masks
__lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def snake_case ( ):
'''simple docstring'''
__lowercase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowercase = np.load(lowerCamelCase )
return list(lowerCamelCase )
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
_lowerCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_video()
__lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(_lowerCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_video()
__lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# add boolean mask, indicating which patches to mask
__lowercase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowercase = torch.load(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = torch.Size([1, 1408, 1536] )
__lowercase = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=_lowerCAmelCase )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__lowercase = torch.tensor([0.5_142] , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__lowercase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=_lowerCAmelCase ).to(
_lowerCAmelCase )
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
__lowercase = torch.tensor(torch.tensor([0.6_469] ) , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1e-4 ) )
| 53
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCamelCase : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 1
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return max(metric_fn(lowerCamelCase , lowerCamelCase ) for gt in ground_truths )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [line.strip() for line in open(lowerCamelCase , """r""" ).readlines()]
__lowercase = []
if args.gold_data_mode == "qa":
__lowercase = pd.read_csv(lowerCamelCase , sep="""\t""" , header=lowerCamelCase )
for answer_list in data[1]:
__lowercase = ast.literal_eval(lowerCamelCase )
answers.append(lowerCamelCase )
else:
__lowercase = [line.strip() for line in open(lowerCamelCase , """r""" ).readlines()]
__lowercase = [[reference] for reference in references]
__lowercase = __lowercase = __lowercase = 0
for prediction, ground_truths in zip(lowerCamelCase , lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase , lowerCamelCase , lowerCamelCase )
fa += metric_max_over_ground_truths(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = 100.0 * em / total
__lowercase = 100.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = args.k
__lowercase = [line.strip() for line in open(lowerCamelCase , """r""" ).readlines()]
__lowercase = [line.strip() for line in open(lowerCamelCase , """r""" ).readlines()]
__lowercase = __lowercase = 0
for hypo, reference in zip(lowerCamelCase , lowerCamelCase ):
__lowercase = set(hypo.split("""\t""" )[:k] )
__lowercase = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__lowercase = 100.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def strip_title(lowerCamelCase ):
if title.startswith("""\"""" ):
__lowercase = title[1:]
if title.endswith("""\"""" ):
__lowercase = title[:-1]
return title
__lowercase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase , return_tensors="""pt""" , padding=lowerCamelCase , truncation=lowerCamelCase , )["""input_ids"""].to(args.device )
__lowercase = rag_model.rag.question_encoder(lowerCamelCase )
__lowercase = question_enc_outputs[0]
__lowercase = rag_model.retriever(
lowerCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
__lowercase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__lowercase = []
for docs in all_docs:
__lowercase = [strip_title(lowerCamelCase ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(lowerCamelCase ) )
return provenance_strings
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
__lowercase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase , return_tensors="""pt""" , padding=lowerCamelCase , truncation=lowerCamelCase )
__lowercase = inputs_dict.input_ids.to(args.device )
__lowercase = inputs_dict.attention_mask.to(args.device )
__lowercase = rag_model.generate( # rag_model overwrites generate
lowerCamelCase , attention_mask=lowerCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__lowercase = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
if args.print_predictions:
for q, a in zip(lowerCamelCase , lowerCamelCase ):
logger.info("""Q: {} - A: {}""".format(lowerCamelCase , lowerCamelCase ) )
return answers
def snake_case ( ):
'''simple docstring'''
__lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=lowerCamelCase , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=lowerCamelCase , choices=["""exact""", """compressed""", """legacy"""] , type=lowerCamelCase , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=lowerCamelCase , type=lowerCamelCase , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=lowerCamelCase , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=lowerCamelCase , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=lowerCamelCase , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=lowerCamelCase , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=lowerCamelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=lowerCamelCase , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=lowerCamelCase , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=lowerCamelCase , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=lowerCamelCase , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
__lowercase = parser.parse_args()
__lowercase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
if args.model_type is None:
__lowercase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
__lowercase = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
__lowercase = args.n_docs
if args.index_name is not None:
__lowercase = args.index_name
if args.index_path is not None:
__lowercase = args.index_path
else:
__lowercase = BartForConditionalGeneration
__lowercase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , lowerCamelCase )
__lowercase = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
__lowercase = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(lowerCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(lowerCamelCase ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
__lowercase = RagRetriever.from_pretrained(lowerCamelCase , **lowerCamelCase )
__lowercase = model_class.from_pretrained(lowerCamelCase , retriever=lowerCamelCase , **lowerCamelCase )
model.retriever.init_retrieval()
else:
__lowercase = model_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
__lowercase = []
for line in tqdm(lowerCamelCase ):
questions.append(line.strip() )
if len(lowerCamelCase ) == args.eval_batch_size:
__lowercase = evaluate_batch_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
preds_file.write("""\n""".join(lowerCamelCase ) + """\n""" )
preds_file.flush()
__lowercase = []
if len(lowerCamelCase ) > 0:
__lowercase = evaluate_batch_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
preds_file.write("""\n""".join(lowerCamelCase ) )
preds_file.flush()
score_fn(lowerCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__UpperCamelCase : Dict = get_args()
main(args)
| 53
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 1
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__UpperCamelCase : Optional[int] = logging.getLogger(__name__)
__UpperCamelCase : Optional[int] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__snake_case :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__snake_case :Optional[str] = field(default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__snake_case :Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__snake_case :Optional[int] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
__snake_case :Optional[int] = field(
default=_lowerCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def _a ( self : str ) -> int:
"""simple docstring"""
if self.train_file is not None:
__lowercase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowercase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
__lowercase = [json.loads(lowerCamelCase ) for line in f.read().splitlines() if (len(lowerCamelCase ) > 0 and not line.isspace())]
assert len(lowerCamelCase ) == len(lowerCamelCase )
__lowercase = {c: dataset[c] for c in dataset.column_names}
__lowercase = refs
return Dataset.from_dict(lowerCamelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowercase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
__lowercase = {}
if data_args.train_file is not None:
__lowercase = data_args.train_file
if data_args.validation_file is not None:
__lowercase = data_args.validation_file
__lowercase = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
__lowercase = """text"""
__lowercase = load_dataset(lowerCamelCase , data_files=lowerCamelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , **lowerCamelCase )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
__lowercase = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCamelCase )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCamelCase )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
__lowercase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelForMaskedLM.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowercase = datasets["""train"""].column_names
else:
__lowercase = datasets["""validation"""].column_names
__lowercase = """text""" if """text""" in column_names else column_names[0]
__lowercase = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(lowerCamelCase ):
# Remove empty lines
__lowercase = [line for line in examples["""text"""] if len(lowerCamelCase ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=data_args.max_seq_length )
__lowercase = datasets.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowercase = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowercase = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowercase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowercase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowercase = DataCollatorForWholeWordMask(tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowercase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowercase = model_args.model_name_or_path
else:
__lowercase = None
__lowercase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowercase = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = perplexity
__lowercase = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 1
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : str , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 53
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 1
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case :Optional[Any] = False
__snake_case :Dict = False
def _a ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=False ) -> List[Any]:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
__lowercase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : int=99 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : int=37 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=512 , _lowerCAmelCase : List[str]=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : int=4 , _lowerCAmelCase : str=None , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = embedding_size
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
__lowercase = TFMobileBertModel(config=_lowerCAmelCase )
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase = model(_lowerCAmelCase )
__lowercase = [input_ids, input_mask]
__lowercase = model(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFMobileBertForPreTraining(config=_lowerCAmelCase )
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFMobileBertModelTest.TFMobileBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def _a ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
__lowercase = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_lowerCAmelCase )[0]
__lowercase = [1, 6, 3_0522]
self.assertEqual(output.shape , _lowerCAmelCase )
__lowercase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 )
| 53
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 1
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : bool = False , ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = nn.Embedding(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = nn.Embedding(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = False
__lowercase = nn.Dropout(p=_lowerCAmelCase )
__lowercase = TaConfig(
vocab_size=_lowerCAmelCase , d_model=_lowerCAmelCase , num_heads=_lowerCAmelCase , d_kv=_lowerCAmelCase , d_ff=_lowerCAmelCase , dropout_rate=_lowerCAmelCase , feed_forward_proj=_lowerCAmelCase , is_decoder=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , )
__lowercase = nn.ModuleList()
for lyr_num in range(_lowerCAmelCase ):
__lowercase = TaBlock(_lowerCAmelCase )
self.encoders.append(_lowerCAmelCase )
__lowercase = TaLayerNorm(_lowerCAmelCase )
__lowercase = nn.Dropout(p=_lowerCAmelCase )
def _a ( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.token_embedder(_lowerCAmelCase )
__lowercase = encoder_input_tokens.shape[1]
__lowercase = torch.arange(_lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowerCAmelCase )
__lowercase = self.dropout_pre(_lowerCAmelCase )
# inverted the attention mask
__lowercase = encoder_input_tokens.size()
__lowercase = self.get_extended_attention_mask(_lowerCAmelCase , _lowerCAmelCase )
for lyr in self.encoders:
__lowercase = lyr(_lowerCAmelCase , _lowerCAmelCase )[0]
__lowercase = self.layer_norm(_lowerCAmelCase )
return self.dropout_post(_lowerCAmelCase ), encoder_inputs_mask
| 53
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 1
|
from __future__ import annotations
def snake_case ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , ):
'''simple docstring'''
__lowercase = cipher_alphabet or [chr(lowerCamelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowercase = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
__lowercase = frequencies_dict
if not case_sensitive:
__lowercase = ciphertext.lower()
# Chi squared statistic values
__lowercase = {}
# cycle through all of the shifts
for shift in range(len(lowerCamelCase ) ):
__lowercase = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowercase = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCamelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowercase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowercase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase = decrypted_with_shift.lower().count(lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase = decrypted_with_shift.count(lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowercase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCamelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowercase = min(
lowerCamelCase , key=lowerCamelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowercase
) , (
__lowercase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 53
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 1
|
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = arr.split(""",""" )
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = [int(self.array[0] )] * len(self.array )
__lowercase = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__lowercase = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__lowercase = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = input("""please input some numbers:""")
__UpperCamelCase : Dict = SubArray(whole_array)
__UpperCamelCase : str = array.solve_sub_array()
print(("""the results is:""", re))
| 53
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = 'xlm'
__snake_case :Any = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : str , _lowerCAmelCase : Tuple=3_0145 , _lowerCAmelCase : Optional[Any]=2048 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Any=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[Any]=512 , _lowerCAmelCase : Tuple=2048**-0.5 , _lowerCAmelCase : int=1e-12 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : int=0 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[str]="first" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Dict=True , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Dict=5 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Union[str, Any]=0 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[Any]=0 , **_lowerCAmelCase : str , ) -> Dict:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = emb_dim
__lowercase = n_layers
__lowercase = n_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = gelu_activation
__lowercase = sinusoidal_embeddings
__lowercase = causal
__lowercase = asm
__lowercase = n_langs
__lowercase = use_lang_emb
__lowercase = layer_norm_eps
__lowercase = bos_index
__lowercase = eos_index
__lowercase = pad_index
__lowercase = unk_index
__lowercase = mask_index
__lowercase = is_encoder
__lowercase = max_position_embeddings
__lowercase = embed_init_std
__lowercase = init_std
__lowercase = summary_type
__lowercase = summary_use_proj
__lowercase = summary_activation
__lowercase = summary_proj_to_labels
__lowercase = summary_first_dropout
__lowercase = start_n_top
__lowercase = end_n_top
__lowercase = mask_token_id
__lowercase = lang_id
if "n_words" in kwargs:
__lowercase = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
@property
def _a ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 53
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__UpperCamelCase : List[Any] = None
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Dict = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : Dict = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
__UpperCamelCase : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
__UpperCamelCase : List[Any] = """▁"""
# Segments (not really needed)
__UpperCamelCase : Dict = 0
__UpperCamelCase : str = 1
__UpperCamelCase : Union[str, Any] = 2
__UpperCamelCase : str = 3
__UpperCamelCase : List[str] = 4
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[Any] = VOCAB_FILES_NAMES
__snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :str = 'left'
__snake_case :Optional[int] = XLNetTokenizer
def __init__( self : Dict , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Any=False , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]="<s>" , _lowerCAmelCase : Any="</s>" , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Tuple="<sep>" , _lowerCAmelCase : Tuple="<pad>" , _lowerCAmelCase : Optional[Any]="<cls>" , _lowerCAmelCase : List[str]="<mask>" , _lowerCAmelCase : int=["<eop>", "<eod>"] , **_lowerCAmelCase : Tuple , ) -> int:
"""simple docstring"""
__lowercase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase = 3
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def _a ( self : Optional[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a ( self : str , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[str] = ['image_processor', 'tokenizer']
__snake_case :List[Any] = 'BlipImageProcessor'
__snake_case :Tuple = 'AutoTokenizer'
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
__lowercase = False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self.image_processor
def __call__( self : List[str] , _lowerCAmelCase : ImageInput = None , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Tuple , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__lowercase = self.tokenizer
__lowercase = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
# add pixel_values
__lowercase = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
if text is not None:
__lowercase = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
else:
__lowercase = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def _a ( self : Optional[int] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Union[str, Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 53
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 1
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
__lowercase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 1
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__UpperCamelCase : Tuple = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__UpperCamelCase : Dict = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__UpperCamelCase : Optional[int] = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[int]="auto" , _lowerCAmelCase : Tuple=-1 , _lowerCAmelCase : Optional[int]=0.9 , _lowerCAmelCase : int=5 , _lowerCAmelCase : Tuple=500 , _lowerCAmelCase : List[Any]="gpt2-large" , _lowerCAmelCase : List[str]=-1 , _lowerCAmelCase : List[Any]=1024 , _lowerCAmelCase : Any=25 , _lowerCAmelCase : List[Any]=5 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Optional[int]=25 , ) -> Optional[int]:
"""simple docstring"""
__lowercase = compute_mauve(
p_text=_lowerCAmelCase , q_text=_lowerCAmelCase , p_features=_lowerCAmelCase , q_features=_lowerCAmelCase , p_tokens=_lowerCAmelCase , q_tokens=_lowerCAmelCase , num_buckets=_lowerCAmelCase , pca_max_data=_lowerCAmelCase , kmeans_explained_var=_lowerCAmelCase , kmeans_num_redo=_lowerCAmelCase , kmeans_max_iter=_lowerCAmelCase , featurize_model_name=_lowerCAmelCase , device_id=_lowerCAmelCase , max_text_length=_lowerCAmelCase , divergence_curve_discretization_size=_lowerCAmelCase , mauve_scaling_factor=_lowerCAmelCase , verbose=_lowerCAmelCase , seed=_lowerCAmelCase , )
return out
| 53
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 1
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :torch.FloatTensor
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : str , _lowerCAmelCase : int = 16 , _lowerCAmelCase : int = 88 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 1 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : int = 32 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : str = "geglu" , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = num_attention_heads
__lowercase = attention_head_dim
__lowercase = num_attention_heads * attention_head_dim
__lowercase = in_channels
__lowercase = torch.nn.GroupNorm(num_groups=_lowerCAmelCase , num_channels=_lowerCAmelCase , eps=1e-6 , affine=_lowerCAmelCase )
__lowercase = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
# 3. Define transformers blocks
__lowercase = nn.ModuleList(
[
BasicTransformerBlock(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dropout=_lowerCAmelCase , cross_attention_dim=_lowerCAmelCase , activation_fn=_lowerCAmelCase , attention_bias=_lowerCAmelCase , double_self_attention=_lowerCAmelCase , norm_elementwise_affine=_lowerCAmelCase , )
for d in range(_lowerCAmelCase )
] )
__lowercase = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : int=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Any=None , _lowerCAmelCase : bool = True , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = hidden_states.shape
__lowercase = batch_frames // num_frames
__lowercase = hidden_states
__lowercase = hidden_states[None, :].reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowercase = self.norm(_lowerCAmelCase )
__lowercase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = self.proj_in(_lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
__lowercase = block(
_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , timestep=_lowerCAmelCase , cross_attention_kwargs=_lowerCAmelCase , class_labels=_lowerCAmelCase , )
# 3. Output
__lowercase = self.proj_out(_lowerCAmelCase )
__lowercase = (
hidden_states[None, None, :]
.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowercase = hidden_states.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_lowerCAmelCase )
| 53
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Union[str, Any] = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
__UpperCamelCase : Optional[Any] = """▁"""
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = VOCAB_FILES_NAMES
__snake_case :List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Optional[Any] = ['input_ids', 'attention_mask']
__snake_case :Tuple = BarthezTokenizer
def __init__( self : int , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Any="</s>" , _lowerCAmelCase : List[str]="</s>" , _lowerCAmelCase : int="<s>" , _lowerCAmelCase : Union[str, Any]="<unk>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : Any="<mask>" , **_lowerCAmelCase : Optional[Any] , ) -> int:
"""simple docstring"""
__lowercase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def _a ( self : str , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : Optional[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__UpperCamelCase : List[Any] = """RegNetConfig"""
# Base docstring
__UpperCamelCase : Dict = """facebook/regnet-y-040"""
__UpperCamelCase : Tuple = [1, 1088, 7, 7]
# Image classification docstring
__UpperCamelCase : Union[str, Any] = """facebook/regnet-y-040"""
__UpperCamelCase : Tuple = """tabby, tabby cat"""
__UpperCamelCase : Optional[int] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : str , _lowerCAmelCase : int , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[str] = "relu" , **_lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowercase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__lowercase = tf.keras.layers.ConvaD(
filters=_lowerCAmelCase , kernel_size=_lowerCAmelCase , strides=_lowerCAmelCase , padding="""VALID""" , groups=_lowerCAmelCase , use_bias=_lowerCAmelCase , name="""convolution""" , )
__lowercase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
__lowercase = ACTaFN[activation] if activation is not None else tf.identity
def _a ( self : int , _lowerCAmelCase : Any ) -> str:
"""simple docstring"""
__lowercase = self.convolution(self.padding(_lowerCAmelCase ) )
__lowercase = self.normalization(_lowerCAmelCase )
__lowercase = self.activation(_lowerCAmelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : int , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = config.num_channels
__lowercase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def _a ( self : Optional[int] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = shape_list(_lowerCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowercase = tf.transpose(_lowerCAmelCase , perm=(0, 2, 3, 1) )
__lowercase = self.embedder(_lowerCAmelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = tf.keras.layers.ConvaD(
filters=_lowerCAmelCase , kernel_size=1 , strides=_lowerCAmelCase , use_bias=_lowerCAmelCase , name="""convolution""" )
__lowercase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
def _a ( self : List[Any] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False ) -> tf.Tensor:
"""simple docstring"""
return self.normalization(self.convolution(_lowerCAmelCase ) , training=_lowerCAmelCase )
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name="""pooler""" )
__lowercase = [
tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def _a ( self : int , _lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.pooler(_lowerCAmelCase )
for layer_module in self.attention:
__lowercase = layer_module(_lowerCAmelCase )
__lowercase = hidden_state * pooled
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowercase = [
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
_lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name="""layer.2""" ),
]
__lowercase = ACTaFN[config.hidden_act]
def _a ( self : int , _lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
__lowercase = hidden_state
for layer_module in self.layers:
__lowercase = layer_module(_lowerCAmelCase )
__lowercase = self.shortcut(_lowerCAmelCase )
hidden_state += residual
__lowercase = self.activation(_lowerCAmelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : str , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
__lowercase = [
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
_lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(_lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name="""layer.3""" ),
]
__lowercase = ACTaFN[config.hidden_act]
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = hidden_state
for layer_module in self.layers:
__lowercase = layer_module(_lowerCAmelCase )
__lowercase = self.shortcut(_lowerCAmelCase )
hidden_state += residual
__lowercase = self.activation(_lowerCAmelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
__lowercase = [
# downsampling is done in the first layer with stride of 2
layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase , name="""layers.0""" ),
*[layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def _a ( self : Optional[Any] , _lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
for layer_module in self.layers:
__lowercase = layer_module(_lowerCAmelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_lowerCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , depth=_lowerCAmelCase , name=F'stages.{i+1}' ) )
def _a ( self : List[Any] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True ) -> TFBaseModelOutputWithNoAttention:
"""simple docstring"""
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(_lowerCAmelCase )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase )
@keras_serializable
class __UpperCamelCase ( tf.keras.layers.Layer ):
__snake_case :Dict = RegNetConfig
def __init__( self : List[str] , _lowerCAmelCase : Optional[int] , **_lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = config
__lowercase = TFRegNetEmbeddings(_lowerCAmelCase , name="""embedder""" )
__lowercase = TFRegNetEncoder(_lowerCAmelCase , name="""encoder""" )
__lowercase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name="""pooler""" )
@unpack_inputs
def _a ( self : List[str] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(_lowerCAmelCase , training=_lowerCAmelCase )
__lowercase = self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_lowerCAmelCase )
# Change to NCHW output format have uniformity in the modules
__lowercase = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) )
__lowercase = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowercase = tuple([tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = RegNetConfig
__snake_case :Optional[Any] = 'regnet'
__snake_case :Dict = 'pixel_values'
@property
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__UpperCamelCase : Tuple = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
__UpperCamelCase : Union[str, Any] = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , _lowerCAmelCase , )
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Optional[Any] , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = TFRegNetMainLayer(_lowerCAmelCase , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self : Tuple , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : int=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(
pixel_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _lowerCAmelCase , )
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
def __init__( self : int , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = config.num_labels
__lowercase = TFRegNetMainLayer(_lowerCAmelCase , name="""regnet""" )
# classification head
__lowercase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self : List[Any] , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Union[str, Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier[0](_lowerCAmelCase )
__lowercase = self.classifier[1](_lowerCAmelCase )
__lowercase = None if labels is None else self.hf_compute_loss(labels=_lowerCAmelCase , logits=_lowerCAmelCase )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = """hf-internal-testing/tiny-random-t5"""
__lowercase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
__lowercase = tokenizer("""This is me""" , return_tensors="""pt""" )
__lowercase = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__lowercase = model.generate(**_lowerCAmelCase )
__lowercase = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__lowercase = model_reloaded.generate(**_lowerCAmelCase )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = """hf-internal-testing/tiny-random-t5"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
__lowercase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_lowerCAmelCase ):
model.save_pretrained(_lowerCAmelCase )
__lowercase = model.reverse_bettertransformer()
model.save_pretrained(_lowerCAmelCase )
| 53
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 1
|
from math import factorial
def snake_case ( lowerCamelCase = 100 ):
'''simple docstring'''
return sum(int(lowerCamelCase ) for x in str(factorial(lowerCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 53
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 1
|
class __UpperCamelCase ( _lowerCAmelCase ):
pass
class __UpperCamelCase ( _lowerCAmelCase ):
pass
class __UpperCamelCase :
def __init__( self : int ) -> int:
"""simple docstring"""
__lowercase = [
[],
[],
[],
]
def _a ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(_lowerCAmelCase )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self : str ) -> str:
"""simple docstring"""
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class __UpperCamelCase :
def __init__( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = []
def _a ( self : List[str] , _lowerCAmelCase : int ) -> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(_lowerCAmelCase )
def _a ( self : Any ) -> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
__lowercase = min(self.queue )
self.queue.remove(_lowerCAmelCase )
return data
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.queue )
def snake_case ( ):
'''simple docstring'''
__lowercase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def snake_case ( ):
'''simple docstring'''
__lowercase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 53
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 1
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@require_torch
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
__lowercase = load_dataset("""ashraq/esc50""" )
__lowercase = dataset["""train"""]["""audio"""][-1]["""array"""]
__lowercase = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def _a ( self : Any ) -> int:
"""simple docstring"""
pass
@slow
@require_torch
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
__lowercase = load_dataset("""ashraq/esc50""" )
__lowercase = dataset["""train"""]["""audio"""][-1]["""array"""]
__lowercase = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
__lowercase = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
__lowercase = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
| 53
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 1
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return "".join(chr(ord(lowerCamelCase ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 53
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 1
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : int="resnet50" , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Union[str, Any]=True , ) -> Optional[int]:
"""simple docstring"""
__lowercase = parent
__lowercase = out_indices if out_indices is not None else [4]
__lowercase = stage_names
__lowercase = out_features
__lowercase = backbone
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = use_pretrained_backbone
__lowercase = is_training
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = self.get_config()
return config, pixel_values
def _a ( self : Any ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
__lowercase = TimmBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :str = (TimmBackbone,) if is_torch_available() else ()
__snake_case :Dict = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
__snake_case :Union[str, Any] = False
__snake_case :List[Any] = False
__snake_case :Tuple = False
__snake_case :Dict = False
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = TimmBackboneModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def _a ( self : str ) -> List[str]:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = """resnet18"""
__lowercase = """microsoft/resnet-18"""
__lowercase = AutoBackbone.from_pretrained(_lowerCAmelCase , use_timm_backbone=_lowerCAmelCase )
__lowercase = AutoBackbone.from_pretrained(_lowerCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase = AutoBackbone.from_pretrained(_lowerCAmelCase , use_timm_backbone=_lowerCAmelCase , out_indices=[1, 2, 3] )
__lowercase = AutoBackbone.from_pretrained(_lowerCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def _a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _a ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def _a ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def _a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : Tuple ) -> int:
"""simple docstring"""
pass
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase = self.all_model_classes[0]
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = model(**_lowerCAmelCase )
__lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(**_lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase = copy.deepcopy(_lowerCAmelCase )
__lowercase = None
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(**_lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase = copy.deepcopy(_lowerCAmelCase )
__lowercase = False
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(**_lowerCAmelCase )
| 53
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : Any = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__UpperCamelCase : List[Any] = {"""mobilebert-uncased""": 512}
__UpperCamelCase : Optional[Any] = {}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = VOCAB_FILES_NAMES
__snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case :Tuple = PRETRAINED_INIT_CONFIGURATION
__snake_case :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Optional[int] = MobileBertTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]="[UNK]" , _lowerCAmelCase : List[str]="[SEP]" , _lowerCAmelCase : Dict="[PAD]" , _lowerCAmelCase : str="[CLS]" , _lowerCAmelCase : List[str]="[MASK]" , _lowerCAmelCase : str=True , _lowerCAmelCase : Tuple=None , **_lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCAmelCase )
__lowercase = do_lower_case
def _a ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int]=None ) -> Tuple:
"""simple docstring"""
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 53
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 1
|
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def _a ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ),
}
| 700
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 0
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
__lowercase = hex_num[0] == """-"""
if is_negative:
__lowercase = hex_num[1:]
try:
__lowercase = int(__A , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
__lowercase = """"""
while int_num > 0:
__lowercase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 0
|
__UpperCamelCase : str = 'Input must be a string of 8 numbers plus letter'
__UpperCamelCase : Dict = 'TRWAGMYFPDXBNJZSQVHLCKE'
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = F'Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}'
raise TypeError(_SCREAMING_SNAKE_CASE )
__lowercase = spanish_id.replace("""-""" , """""" ).upper()
if len(_SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
__lowercase = int(spanish_id_clean[0:8] )
__lowercase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(_SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 0
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
__snake_case :Tuple = ["input_features", "attention_mask"]
def __init__( self : str , _lowerCAmelCase : Tuple=80 , _lowerCAmelCase : Union[str, Any]=1_6000 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : int=10 , _lowerCAmelCase : Optional[int]=25 , _lowerCAmelCase : List[Any]="hamming_window" , _lowerCAmelCase : Dict=3_2768.0 , _lowerCAmelCase : Optional[Any]=0.97 , _lowerCAmelCase : Any=1.0 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=False , **_lowerCAmelCase : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ )
__lowercase = feature_size
__lowercase = sampling_rate
__lowercase = padding_value
__lowercase = hop_length
__lowercase = win_length
__lowercase = frame_signal_scale
__lowercase = preemphasis_coeff
__lowercase = mel_floor
__lowercase = normalize_means
__lowercase = normalize_vars
__lowercase = win_function
__lowercase = return_attention_mask
__lowercase = win_length * sampling_rate // 1000
__lowercase = hop_length * sampling_rate // 1000
__lowercase = optimal_fft_length(self.sample_size )
__lowercase = (self.n_fft // 2) + 1
def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> np.ndarray:
"""simple docstring"""
if self.win_function == "hamming_window":
__lowercase = window_function(window_length=self.sample_size , name=self.win_function , periodic=A_ )
else:
__lowercase = window_function(window_length=self.sample_size , name=self.win_function )
__lowercase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__lowercase = spectrogram(
one_waveform * self.frame_signal_scale , window=A_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=A_ , preemphasis=self.preemphasis_coeff , mel_filters=A_ , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def _a ( self : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.normalize_means:
__lowercase = x[:input_length].mean(axis=0 )
__lowercase = np.subtract(A_ , A_ )
if self.normalize_vars:
__lowercase = x[:input_length].std(axis=0 )
__lowercase = np.divide(A_ , A_ )
if input_length < x.shape[0]:
__lowercase = padding_value
# make sure array is in float32
__lowercase = x.astype(np.floataa )
return x
def _a ( self : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple = None ) -> List[np.ndarray]:
"""simple docstring"""
__lowercase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A_ , A_ , self.padding_value ) for x, n in zip(A_ , A_ )]
def __call__( self : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] = False , _lowerCAmelCase : List[str] = None , _lowerCAmelCase : Optional[int] = False , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Dict = None , _lowerCAmelCase : str = None , **_lowerCAmelCase : Dict , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__lowercase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__lowercase = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase = [np.asarray(A_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
__lowercase = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase = [raw_speech]
# extract fbank features
__lowercase = [self._extract_mfsc_features(A_ ) for one_waveform in raw_speech]
# convert into correct format for padding
__lowercase = BatchFeature({"""input_features""": features} )
__lowercase = self.pad(
A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , )
# make sure list is in array format
__lowercase = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , A_ ):
__lowercase = [np.asarray(A_ , dtype=np.floataa ) for feature in input_features]
__lowercase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__lowercase = [np.asarray(A_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__lowercase = (
np.array(A_ , dtype=np.intaa )
if self._get_padding_strategies(A_ , max_length=A_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__lowercase = self.normalize(
padded_inputs["""input_features"""] , attention_mask=A_ )
if return_tensors is not None:
__lowercase = padded_inputs.convert_to_tensors(A_ )
return padded_inputs
| 703
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 0
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : int ) -> Any:
"""simple docstring"""
__lowercase = torch.nn.Linear(10 , 10 )
__lowercase = torch.optim.SGD(model.parameters() , 0.1 )
__lowercase = Accelerator()
__lowercase = accelerator.prepare(UpperCamelCase_ )
try:
pickle.loads(pickle.dumps(UpperCamelCase_ ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 704
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __UpperCamelCase ( lowercase__ ):
__snake_case :str = 'swin2sr'
__snake_case :Dict = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : Any=1 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : Any=180 , _lowerCAmelCase : Optional[int]=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : str=8 , _lowerCAmelCase : List[str]=2.0 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : Any=False , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : Optional[int]=1e-5 , _lowerCAmelCase : str=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : int="1conv" , _lowerCAmelCase : str="pixelshuffle" , **_lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(__lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = upscale
__lowercase = img_range
__lowercase = resi_connection
__lowercase = upsampler
| 705
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 0
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__UpperCamelCase : Dict = Lock()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCamelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__lowercase = min(lowerCamelCase_ , lowerCamelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCamelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__lowercase = max(lowerCamelCase_ , lowerCamelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCamelCase_ )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=lowerCamelCase_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
for i in range(1 , len(lowerCamelCase_ ) - 1 ):
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=lowerCamelCase_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
process_array_.append(
Process(
target=lowerCamelCase_ , args=(
len(lowerCamelCase_ ) - 1,
arr[len(lowerCamelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCamelCase_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCamelCase_ ) ):
__lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def snake_case ( ):
'''simple docstring'''
__lowercase = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*lowerCamelCase_ )
__lowercase = odd_even_transposition(lowerCamelCase_ )
print("""Sorted List\n""" )
print(*lowerCamelCase_ )
if __name__ == "__main__":
main()
| 706
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : Dict = "▁"
__UpperCamelCase : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __UpperCamelCase ( UpperCAmelCase_ , unittest.TestCase ):
__snake_case :Optional[Any] = BertGenerationTokenizer
__snake_case :Dict = False
__snake_case :List[Any] = True
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
__lowercase = BertGenerationTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase = '<s>'
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_lowercase ) , 1002 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = BertGenerationTokenizer(_lowercase , keep_accents=_lowercase )
__lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [285, 46, 10, 170, 382] , )
__lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowercase = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = 'Hello World!'
__lowercase = [1_8536, 2260, 101]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
__lowercase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@require_torch
@slow
def _a ( self : Any ) -> Any:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__lowercase = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowercase = ' '.join(_lowercase )
__lowercase = self.big_tokenizer.encode_plus(_lowercase , return_tensors="""pt""" , return_token_type_ids=_lowercase )
__lowercase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_lowercase )
__lowercase = BertGenerationConfig()
__lowercase = BertGenerationEncoder(_lowercase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase )
model(**_lowercase )
@slow
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = {'input_ids': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 707
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __UpperCamelCase ( __A , __A , __A , unittest.TestCase ):
__snake_case :str = StableDiffusionLatentUpscalePipeline
__snake_case :Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__snake_case :int = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__snake_case :Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__snake_case :Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case :str = frozenset([] )
__snake_case :Optional[Any] = True
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = 1
__lowercase = 4
__lowercase = (16, 16)
__lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
def _a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=_lowerCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=_lowerCAmelCase , only_cross_attention=_lowerCAmelCase , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
__lowercase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
__lowercase = EulerDiscreteScheduler(prediction_type="""sample""" )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""quick_gelu""" , projection_dim=512 , )
__lowercase = CLIPTextModel(_lowerCAmelCase )
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=0 ) -> List[str]:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
__lowercase = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
__lowercase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1e-3 )
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def _a ( self : str ) -> int:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _a ( self : str ) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def _a ( self : Dict ) -> int:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_lowerCAmelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = 2
__lowercase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__lowercase = getattr(_lowerCAmelCase , scheduler_enum.name )
__lowercase = scheduler_cls.from_config(pipe.scheduler.config )
__lowercase = pipe(**_lowerCAmelCase )[0]
outputs.append(_lowerCAmelCase )
assert check_same_shape(_lowerCAmelCase )
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = torch.manual_seed(33 )
__lowercase = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__lowercase = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
__lowercase = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__lowercase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , output_type="""latent""" ).images
__lowercase = upscaler(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_lowerCAmelCase , output_type="""np""" , ).images[0]
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = torch.manual_seed(33 )
__lowercase = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
__lowercase = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
__lowercase = upscaler(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_lowerCAmelCase , output_type="""np""" , ).images[0]
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 708
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 0
|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__UpperCamelCase : List[Any] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
__UpperCamelCase : int = 'hopper-medium-v2'
__UpperCamelCase : List[Any] = gym.make(env_name)
__UpperCamelCase : str = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
__UpperCamelCase : List[str] = env.reset()
__UpperCamelCase : Any = 0
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Optional[Any] = 1000
__UpperCamelCase : Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__UpperCamelCase : List[Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
__UpperCamelCase : Optional[int] = env.step(denorm_actions)
__UpperCamelCase : Tuple = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
__UpperCamelCase : List[str] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 0
|
import os
import platform
import sys
__UpperCamelCase : List[str] = "3"
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 710
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __UpperCamelCase :
def __init__( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any=13 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple=99 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Optional[Any]=37 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : str=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : str=None , _lowerCAmelCase : str=0 , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = projection_dim
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
__lowercase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFDPRContextEncoder(config=__A )
__lowercase = model(__A , attention_mask=__A , token_type_ids=__A )
__lowercase = model(__A , token_type_ids=__A )
__lowercase = model(__A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFDPRQuestionEncoder(config=__A )
__lowercase = model(__A , attention_mask=__A , token_type_ids=__A )
__lowercase = model(__A , token_type_ids=__A )
__lowercase = model(__A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a ( self : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = TFDPRReader(config=__A )
__lowercase = model(__A , attention_mask=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
__lowercase
) = config_and_inputs
__lowercase = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__snake_case :List[str] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
__snake_case :str = False
__snake_case :int = False
__snake_case :Dict = False
__snake_case :Optional[Any] = False
__snake_case :str = False
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = TFDPRModelTester(self )
__lowercase = ConfigTester(self , config_class=__A , hidden_size=37 )
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__A )
def _a ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__A )
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__A )
@slow
def _a ( self : Any ) -> Dict:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFDPRContextEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFDPRContextEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFDPRQuestionEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFDPRReader.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : Any ) -> int:
"""simple docstring"""
__lowercase = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
__lowercase = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
__lowercase = model(__A )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__lowercase = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 711
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 0
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowercase = F'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCAmelCase )
if number < 0:
return False
__lowercase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Dict = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
import datasets
__UpperCamelCase : Dict = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
__UpperCamelCase : Dict = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
__UpperCamelCase : Any = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def _a ( self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
return {"accuracy": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
| 714
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=__lowerCAmelCase ):
__snake_case :Any = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : Dict , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : str , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCamelCase ( metaclass=__lowerCAmelCase ):
__snake_case :Dict = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : str , *_lowerCAmelCase : Any , **_lowerCAmelCase : str ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : Union[str, Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCamelCase ( metaclass=__lowerCAmelCase ):
__snake_case :Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : int , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : Union[str, Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : Any , *_lowerCAmelCase : str , **_lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCamelCase ( metaclass=__lowerCAmelCase ):
__snake_case :List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : int , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCamelCase ( metaclass=__lowerCAmelCase ):
__snake_case :Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : str , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : str , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __UpperCamelCase ( metaclass=__lowerCAmelCase ):
__snake_case :Any = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *_lowerCAmelCase : Any , **_lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def _a ( cls : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 715
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 0
|
import os
import platform
import sys
__UpperCamelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 716
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 0
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__UpperCamelCase : str = 1.0_5457_1817e-34 # unit of ℏ : J * s
__UpperCamelCase : Tuple = 3e8 # unit of c : m * s^-1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
__lowercase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__lowercase = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__lowercase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 0
|
import math
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = len(a_ )
__lowercase = int(math.floor(math.sqrt(a_ ) ) )
__lowercase = 0
while arr[min(a_ , a_ ) - 1] < x:
__lowercase = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowercase = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : List[str] = [int(item) for item in user_input.split(""",""")]
__UpperCamelCase : Dict = int(input("""Enter the number to be searched:\n"""))
__UpperCamelCase : Tuple = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(F'''Number {x} is at index {res}''')
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 0
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( _UpperCAmelCase ):
__snake_case :Any = ['image_processor', 'tokenizer']
__snake_case :List[str] = 'AutoImageProcessor'
__snake_case :Dict = 'AutoTokenizer'
def __init__( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
super().__init__(__UpperCamelCase , __UpperCamelCase )
__lowercase = self.image_processor
def __call__( self : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=None , **_lowerCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__lowercase = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if images is not None:
__lowercase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def _a ( self : Optional[Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _a ( self : Optional[int] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def _a ( self : Any ) -> Any:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 719
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 0
|
import math
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = 0
__lowercase = 0
while num > 0:
__lowercase = num % 8
__lowercase = octal + (remainder * math.floor(math.pow(10 , lowerCamelCase ) ))
counter += 1
__lowercase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(lowerCamelCase )}'
def snake_case ( ):
'''simple docstring'''
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 0
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__UpperCamelCase : Optional[int] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
__UpperCamelCase : Optional[Any] = parser.parse_args()
__UpperCamelCase : Dict = '''cpu'''
__UpperCamelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__UpperCamelCase : Tuple = '''path-to-your-trained-model'''
__UpperCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__UpperCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase : List[Any] = pipe.to(device)
# to channels last
__UpperCamelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__UpperCamelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__UpperCamelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__UpperCamelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__UpperCamelCase : Tuple = torch.randn(2, 4, 64, 64)
__UpperCamelCase : Any = torch.rand(1) * 999
__UpperCamelCase : List[str] = torch.randn(2, 77, 768)
__UpperCamelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__UpperCamelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__UpperCamelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__UpperCamelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__UpperCamelCase : List[str] = 666
__UpperCamelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__UpperCamelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__UpperCamelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__UpperCamelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 721
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 0
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__UpperCamelCase : Dict = logging.getLogger(__name__)
class __UpperCamelCase ( _lowercase ):
__snake_case :List[Any] = '''sequence-classification'''
def __init__( self : Any , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if type(A_ ) == dict:
__lowercase = Namespace(**A_ )
__lowercase = glue_output_modes[hparams.task]
__lowercase = glue_tasks_num_labels[hparams.task]
super().__init__(A_ , A_ , self.mode )
def _a ( self : Dict , **_lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
return self.model(**A_ )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowercase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__lowercase = self(**A_ )
__lowercase = outputs[0]
__lowercase = self.trainer.lr_schedulers[0]["""scheduler"""]
__lowercase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = self.hparams
__lowercase = processors[args.task]()
__lowercase = processor.get_labels()
for mode in ["train", "dev"]:
__lowercase = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , A_ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__lowercase = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
__lowercase = convert_examples_to_features(
A_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , A_ )
torch.save(A_ , A_ )
def _a ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : bool = False ) -> DataLoader:
"""simple docstring"""
__lowercase = """dev""" if mode == """test""" else mode
__lowercase = self._feature_file(A_ )
logger.info("""Loading features from cached file %s""" , A_ )
__lowercase = torch.load(A_ )
__lowercase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__lowercase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__lowercase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__lowercase = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__lowercase = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ , shuffle=A_ , )
def _a ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowercase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__lowercase = self(**A_ )
__lowercase , __lowercase = outputs[:2]
__lowercase = logits.detach().cpu().numpy()
__lowercase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a ( self : List[str] , _lowerCAmelCase : str ) -> tuple:
"""simple docstring"""
__lowercase = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
__lowercase = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__lowercase = np.argmax(A_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__lowercase = np.squeeze(A_ )
__lowercase = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__lowercase = [[] for _ in range(out_label_ids.shape[0] )]
__lowercase = [[] for _ in range(out_label_ids.shape[0] )]
__lowercase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , A_ , A_ )}
__lowercase = dict(results.items() )
__lowercase = results
return ret, preds_list, out_label_list
def _a ( self : Optional[Any] , _lowerCAmelCase : list ) -> dict:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self._eval_end(A_ )
__lowercase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a ( self : Dict , _lowerCAmelCase : Dict ) -> dict:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self._eval_end(A_ )
__lowercase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=A_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=A_ , required=A_ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=A_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def snake_case ( ):
'''simple docstring'''
__lowercase = argparse.ArgumentParser()
add_generic_args(snake_case__ , os.getcwd() )
__lowercase = GLUETransformer.add_model_specific_args(snake_case__ , os.getcwd() )
__lowercase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__lowercase = os.path.join(
"""./results""" , F'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
__lowercase = GLUETransformer(snake_case__ )
__lowercase = generic_train(snake_case__ , snake_case__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__lowercase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=snake_case__ ) )
__lowercase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(snake_case__ )
if __name__ == "__main__":
main()
| 700
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 0
|
from __future__ import annotations
import math
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if num <= 0:
__lowercase = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__SCREAMING_SNAKE_CASE )
__lowercase = [True] * (num + 1)
__lowercase = []
__lowercase = 2
__lowercase = int(math.sqrt(__SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , __SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
__lowercase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 701
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 0
|
from collections import deque
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = len(lowerCamelCase )
__lowercase = deque()
__lowercase = [False for _ in range(lowerCamelCase )]
__lowercase = [-1 for _ in range(lowerCamelCase )]
__lowercase = index_of[:]
def strong_connect(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__lowercase = index # the number when this node is seen
__lowercase = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase )
__lowercase = True
for w in g[v]:
if index_of[w] == -1:
__lowercase = strong_connect(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowercase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowercase = []
__lowercase = stack.pop()
__lowercase = False
component.append(lowerCamelCase )
while w != v:
__lowercase = stack.pop()
__lowercase = False
component.append(lowerCamelCase )
components.append(lowerCamelCase )
return index
__lowercase = []
for v in range(lowerCamelCase ):
if index_of[v] == -1:
strong_connect(lowerCamelCase , 0 , lowerCamelCase )
return components
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [[] for _ in range(lowerCamelCase )]
for u, v in edges:
g[u].append(lowerCamelCase )
return g
if __name__ == "__main__":
# Test
__UpperCamelCase : str = 7
__UpperCamelCase : Union[str, Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__UpperCamelCase : str = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__UpperCamelCase : List[Any] = [(u, v) for u, v in zip(source, target)]
__UpperCamelCase : Any = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 702
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 0
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class __UpperCamelCase ( UpperCamelCase_ ):
__snake_case :Dict = 'bart'
__snake_case :List[str] = ['past_key_values']
__snake_case :Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , _lowerCAmelCase : Dict=5_0265 , _lowerCAmelCase : Dict=1024 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Union[str, Any]=4096 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : str=4096 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Optional[Any]=1024 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Tuple=2 , **_lowerCAmelCase : Any , ) -> Dict:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = classifier_dropout
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __A ):
__lowercase = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
class __UpperCamelCase ( UpperCamelCase_ ):
@property
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowercase = {0: "batch"}
__lowercase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__lowercase = {0: "batch", 1: "decoder_sequence"}
__lowercase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__A , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowercase = self.num_layers
for i in range(__A ):
__lowercase = {0: "batch", 2: "past_sequence + sequence"}
__lowercase = {0: "batch", 2: "past_sequence + sequence"}
else:
__lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _a ( self : int ) -> List[str]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(__A , self ).outputs
if self.use_past:
__lowercase = self.num_layers
for i in range(__A ):
__lowercase = {0: "batch", 2: "past_sequence + sequence"}
__lowercase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _a ( self : Any , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
__lowercase = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**__A , **__A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase = common_inputs["input_ids"].shape
__lowercase = common_inputs["decoder_input_ids"].shape[1]
__lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__A , __A )] , dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase = self.num_layers
__lowercase = min(__A , __A )
__lowercase = max(__A , __A ) - min_num_layers
__lowercase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__A , __A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def _a ( self : List[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = self.num_layers
__lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs["attention_mask"].dtype
__lowercase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__A , __A , dtype=__A )] , dim=1 )
__lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def _a ( self : str , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ) -> Optional[int]:
"""simple docstring"""
__lowercase = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(__A )
__lowercase = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(__A , return_tensors=__A ) )
return common_inputs
def _a ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ) -> Optional[Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
return common_inputs
def _a ( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(__A , __A , __A , __A )
else:
__lowercase = super(__A , self )._flatten_past_key_values_(
__A , __A , __A , __A )
| 703
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 0
|
import cva
import numpy as np
class __UpperCamelCase :
def __init__( self : Union[str, Any] , _lowerCAmelCase : float , _lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
__lowercase = k
__lowercase = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self.k )
def _a ( self : Optional[int] , _lowerCAmelCase : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__lowercase = cva.imread(_lowerCAmelCase , 0 )
__lowercase = img.shape
__lowercase = []
__lowercase = img.copy()
__lowercase = cva.cvtColor(_lowerCAmelCase , cva.COLOR_GRAY2RGB )
__lowercase = np.gradient(_lowerCAmelCase )
__lowercase = dx**2
__lowercase = dy**2
__lowercase = dx * dy
__lowercase = 0.04
__lowercase = self.window_size // 2
for y in range(_lowerCAmelCase , h - offset ):
for x in range(_lowerCAmelCase , w - offset ):
__lowercase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = (wxx * wyy) - (wxy**2)
__lowercase = wxx + wyy
__lowercase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = HarrisCorner(0.0_4, 3)
__UpperCamelCase : Dict = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 704
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 0
|
import itertools
import string
from collections.abc import Generator, Iterable
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = iter(lowerCamelCase )
while True:
__lowercase = tuple(itertools.islice(lowerCamelCase , lowerCamelCase ) )
if not chunk:
return
yield chunk
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
__lowercase = """"""
if len(lowerCamelCase ) < 2:
return dirty
for i in range(len(lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCamelCase ) & 1:
clean += "X"
return clean
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowercase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCamelCase )
return table
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = generate_table(lowerCamelCase )
__lowercase = prepare_input(lowerCamelCase )
__lowercase = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2 ):
__lowercase = divmod(table.index(lowerCamelCase ) , 5 )
__lowercase = divmod(table.index(lowerCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = generate_table(lowerCamelCase )
__lowercase = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2 ):
__lowercase = divmod(table.index(lowerCamelCase ) , 5 )
__lowercase = divmod(table.index(lowerCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 705
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 0
|
__UpperCamelCase : List[Any] = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 706
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
import random
from typing import Any
def snake_case ( lowerCamelCase ):
'''simple docstring'''
for _ in range(len(__snake_case ) ):
__lowercase = random.randint(0 , len(__snake_case ) - 1 )
__lowercase = random.randint(0 , len(__snake_case ) - 1 )
__lowercase , __lowercase = data[b], data[a]
return data
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = [0, 1, 2, 3, 4, 5, 6, 7]
__UpperCamelCase : int = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 707
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 0
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return len(set(UpperCAmelCase__ ) ) == len(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCamelCase : Dict = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = b.T
__lowercase = np.sum(np.square(_lowercase ) , axis=1 )
__lowercase = np.sum(np.square(_lowercase ) , axis=0 )
__lowercase = np.matmul(_lowercase , _lowercase )
__lowercase = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = x.reshape(-1 , 3 )
__lowercase = squared_euclidean_distance(_lowercase , _lowercase )
return np.argmin(_lowercase , axis=1 )
class __UpperCamelCase ( a__ ):
__snake_case :Union[str, Any] = ["""pixel_values"""]
def __init__( self : Optional[int] , _lowerCAmelCase : int = None , _lowerCAmelCase : Optional[Any] = True , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Any = PILImageResampling.BILINEAR , _lowerCAmelCase : int = True , _lowerCAmelCase : Tuple = True , **_lowerCAmelCase : Dict , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
__lowercase = size if size is not None else {"height": 256, "width": 256}
__lowercase = get_size_dict(lowerCAmelCase__ )
__lowercase = np.array(lowerCAmelCase__ ) if clusters is not None else None
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_normalize
__lowercase = do_color_quantize
def _a ( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] = PILImageResampling.BILINEAR , _lowerCAmelCase : List[Any] = None , **_lowerCAmelCase : str , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
lowerCAmelCase__ , size=(size["""height"""], size["""width"""]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] = None , ) -> np.ndarray:
"""simple docstring"""
__lowercase = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__ )
__lowercase = image - 1
return image
def _a ( self : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Dict = None , _lowerCAmelCase : Union[str, Any] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Any] = ChannelDimension.FIRST , **_lowerCAmelCase : Any , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowerCAmelCase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__lowercase = clusters if clusters is not None else self.clusters
__lowercase = np.array(lowerCAmelCase__ )
__lowercase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowerCAmelCase__ ) for image in images]
if do_color_quantize:
__lowercase = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__lowercase = np.array(lowerCAmelCase__ )
__lowercase = color_quantize(lowerCAmelCase__ , lowerCAmelCase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__lowercase = images.shape[0]
__lowercase = images.reshape(lowerCAmelCase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__lowercase = list(lowerCAmelCase__ )
else:
__lowercase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__lowercase = {"input_ids": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 0
|
from numpy import exp, pi, sqrt
def snake_case ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : str=7 , _lowerCAmelCase : str=3 , _lowerCAmelCase : List[str]=18 , _lowerCAmelCase : Tuple=30 , _lowerCAmelCase : Optional[Any]=400 , _lowerCAmelCase : str=True , _lowerCAmelCase : Union[str, Any]=32 , _lowerCAmelCase : List[Any]=True , ) -> Dict:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size_divisor
__lowercase = do_rescale
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
__snake_case :Dict = GLPNImageProcessor if is_vision_available() else None
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = GLPNImageProcessingTester(self )
@property
def _a ( self : List[str] ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size_divisor""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """resample""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_rescale""" ) )
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
pass
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self : str ) -> str:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 711
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 0
|
import datasets
from .evaluate import evaluate
__UpperCamelCase : List[Any] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
__UpperCamelCase : str = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
__UpperCamelCase : Union[str, Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def _a ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
__lowercase = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
__lowercase = evaluate(dataset=UpperCamelCase__ , predictions=UpperCamelCase__ )
return score
| 712
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 0
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if any(not isinstance(__UpperCamelCase , __UpperCamelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__UpperCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__UpperCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 713
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : int=3 , _lowerCAmelCase : List[str]=18 , _lowerCAmelCase : Dict=30 , _lowerCAmelCase : int=400 , _lowerCAmelCase : int=True , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=True , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = size if size is not None else {"""height""": 18, """width""": 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( UpperCAmelCase__ , unittest.TestCase ):
__snake_case :str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """apply_ocr""" ) )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , _lowerCAmelCase )
self.assertIsInstance(encoding.boxes , _lowerCAmelCase )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
__lowercase = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__lowercase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowerCAmelCase )
self.assertListEqual(encoding.boxes , _lowerCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase )
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 714
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
from ....utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
def __init__( self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : int=2048 ) -> Dict:
"""simple docstring"""
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 715
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( __snake_case ):
__snake_case :torch.FloatTensor
__snake_case :torch.FloatTensor
__snake_case :Optional[torch.FloatTensor] = None
class __UpperCamelCase ( __snake_case , __snake_case ):
__snake_case :int = 2
@register_to_config
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[str] = 0.02 , _lowerCAmelCase : Any = 100 , _lowerCAmelCase : Dict = 1.007 , _lowerCAmelCase : str = 80 , _lowerCAmelCase : int = 0.05 , _lowerCAmelCase : Tuple = 50 , ) -> List[Any]:
"""simple docstring"""
__lowercase = sigma_max
# setable values
__lowercase = None
__lowercase = None
__lowercase = None # sigma(t_i)
def _a ( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str = None ) -> List[str]:
"""simple docstring"""
return sample
def _a ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] = None ) -> List[Any]:
"""simple docstring"""
__lowercase = num_inference_steps
__lowercase = np.arange(0 , self.num_inference_steps )[::-1].copy()
__lowercase = torch.from_numpy(A_ ).to(A_ )
__lowercase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__lowercase = torch.tensor(A_ , dtype=torch.floataa , device=A_ )
def _a ( self : str , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : int = None ) -> Dict:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
__lowercase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__lowercase = 0
# sample eps ~ N(0, S_noise^2 * I)
__lowercase = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device )
__lowercase = sigma + gamma * sigma
__lowercase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _a ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] = True , ) -> Optional[int]:
"""simple docstring"""
__lowercase = sample_hat + sigma_hat * model_output
__lowercase = (sample_hat - pred_original_sample) / sigma_hat
__lowercase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def _a ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] = True , ) -> List[Any]:
"""simple docstring"""
__lowercase = sample_prev + sigma_prev * model_output
__lowercase = (sample_prev - pred_original_sample) / sigma_prev
__lowercase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError()
| 716
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 0
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( UpperCamelCase__ ):
def __init__( self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 717
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 0
|
import requests
__UpperCamelCase : Tuple = """YOUR API KEY"""
def snake_case ( lowerCamelCase , lowerCamelCase = giphy_api_key ):
'''simple docstring'''
__lowercase = "+".join(query.split() )
__lowercase = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
__lowercase = requests.get(__lowerCAmelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : int = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : List[str] = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
__UpperCamelCase : Tuple = """▁"""
class __UpperCamelCase ( __a ):
__snake_case :Optional[int] = VOCAB_FILES_NAMES
__snake_case :List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :int = BigBirdTokenizer
__snake_case :List[Any] = ["input_ids", "attention_mask"]
__snake_case :List[int] = []
def __init__( self : int , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : int="<s>" , _lowerCAmelCase : Tuple="</s>" , _lowerCAmelCase : Dict="<pad>" , _lowerCAmelCase : Any="[SEP]" , _lowerCAmelCase : Dict="[MASK]" , _lowerCAmelCase : Optional[int]="[CLS]" , **_lowerCAmelCase : Tuple , ) -> int:
"""simple docstring"""
__lowercase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
__lowercase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
__lowercase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
__lowercase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
__lowercase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
__lowercase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def _a ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : int = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any = None , _lowerCAmelCase : Optional[Any] = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 719
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 0
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
for d in reversed(UpperCamelCase__ ):
idx.append(flat_idx % d )
__lowercase = flat_idx // d
return tuple(reversed(UpperCamelCase__ ) )
@torch.jit.ignore
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
'''simple docstring'''
def reduce_edge_list(lowerCamelCase ) -> None:
__lowercase = True
for i in range(len(UpperCamelCase__ ) ):
__lowercase = -1 * (i + 1)
l[reversed_idx] &= tally
__lowercase = l[reversed_idx]
if start_edges is None:
__lowercase = [s == 0 for s in start]
reduce_edge_list(UpperCamelCase__ )
if end_edges is None:
__lowercase = [e == (d - 1) for e, d in zip(UpperCamelCase__ , UpperCamelCase__ )]
reduce_edge_list(UpperCamelCase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCamelCase__ ) == 0:
return [()]
elif len(UpperCamelCase__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__lowercase = []
__lowercase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCamelCase__ , UpperCamelCase__ ):
if s == e:
path_list.append(slice(UpperCamelCase__ , s + 1 ) )
else:
break
__lowercase = tuple(UpperCamelCase__ )
__lowercase = len(UpperCamelCase__ )
# start == end, and we're done
if divergence_idx == len(UpperCamelCase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowercase = start[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowercase = end[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowercase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = t.shape[:no_batch_dims]
__lowercase = list(_flat_idx_to_idx(UpperCamelCase__ , UpperCamelCase__ ) )
# _get_minimal_slice_set is inclusive
__lowercase = list(_flat_idx_to_idx(flat_end - 1 , UpperCamelCase__ ) )
# Get an ordered list of slices to perform
__lowercase = _get_minimal_slice_set(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
__lowercase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
'''simple docstring'''
if not (len(UpperCamelCase__ ) > 0):
raise ValueError("""Must provide at least one input""" )
__lowercase = [shape[:no_batch_dims] for shape in _fetch_dims(UpperCamelCase__ )]
__lowercase = tuple([max(UpperCamelCase__ ) for s in zip(*UpperCamelCase__ )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowercase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowercase = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__lowercase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowercase = tensor_tree_map(_prep_inputs , UpperCamelCase__ )
__lowercase = None
if _out is not None:
__lowercase = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__lowercase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowercase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowercase = 0
__lowercase = prepped_outputs
for _ in range(UpperCamelCase__ ):
# Chunk the input
if not low_mem:
__lowercase = _select_chunk
else:
__lowercase = partial(
_chunk_slice , flat_start=UpperCamelCase__ , flat_end=min(UpperCamelCase__ , i + chunk_size ) , no_batch_dims=len(UpperCamelCase__ ) , )
__lowercase = tensor_tree_map(UpperCamelCase__ , UpperCamelCase__ )
# Run the layer on the chunk
__lowercase = layer(**UpperCamelCase__ )
# Allocate space for the output
if out is None:
__lowercase = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCamelCase__ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
assign(UpperCamelCase__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowercase = da[k]
assign(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for xa, xa in zip(UpperCamelCase__ , UpperCamelCase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowercase = xa
elif isinstance(UpperCamelCase__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowercase = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
__lowercase = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , UpperCamelCase__ )
return out
class __UpperCamelCase :
def __init__( self : List[str] , _lowerCAmelCase : int = 512 , ) -> Optional[int]:
"""simple docstring"""
__lowercase = max_chunk_size
__lowercase = None
__lowercase = None
def _a ( self : Union[str, Any] , _lowerCAmelCase : Callable , _lowerCAmelCase : tuple , _lowerCAmelCase : int ) -> int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowercase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowercase = [c for c in candidates if c > min_chunk_size]
__lowercase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_lowerCAmelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
__lowercase = 0
__lowercase = len(__a ) - 1
while i > min_viable_chunk_size_index:
__lowercase = test_chunk_size(candidates[i] )
if not viable:
__lowercase = (min_viable_chunk_size_index + i) // 2
else:
__lowercase = i
__lowercase = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _a ( self : Dict , _lowerCAmelCase : Iterable , _lowerCAmelCase : Iterable ) -> bool:
"""simple docstring"""
__lowercase = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
__lowercase = [v for _, v in sorted(aa.items() , key=lambda _lowerCAmelCase : x[0] )]
__lowercase = [v for _, v in sorted(aa.items() , key=lambda _lowerCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def _a ( self : Tuple , _lowerCAmelCase : Callable , _lowerCAmelCase : tuple , _lowerCAmelCase : int , ) -> int:
"""simple docstring"""
__lowercase = True
__lowercase = tree_map(lambda _lowerCAmelCase : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
__lowercase = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
__lowercase = False
if not consistent:
__lowercase = self._determine_favorable_chunk_size(
__a , __a , __a , )
__lowercase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 0
|
'''simple docstring'''
import functools
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = len(_UpperCamelCase )
__lowercase = len(_UpperCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__lowercase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _UpperCamelCase ) , 1 + min_distance(_UpperCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 0
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
for line in lines:
__lowercase = re.sub(r"""#.*""" , """""" , lowerCamelCase ) # remove comments
if line:
filtered_lines.append(lowerCamelCase )
__lowercase = """\n""".join(lowerCamelCase )
# Make a hash from all this code
__lowercase = full_str.encode("""utf-8""" )
return shaaaa(lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
__UpperCamelCase : Optional[int] = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__UpperCamelCase : Dict = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__UpperCamelCase : str = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__UpperCamelCase : Union[str, Any] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 700
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 0
|
def snake_case ( lowerCamelCase = 50 ):
'''simple docstring'''
__lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 701
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( __snake_case , unittest.TestCase ):
__snake_case :Union[str, Any] = ShapEImgaImgPipeline
__snake_case :Tuple = ['image']
__snake_case :Optional[Any] = ['image']
__snake_case :Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__snake_case :int = False
@property
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return 8
@property
def _a ( self : str ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowercase = CLIPVisionModel(__UpperCamelCase )
return model
@property
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCamelCase , do_normalize=__UpperCamelCase , do_resize=__UpperCamelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def _a ( self : Dict ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowercase = PriorTransformer(**__UpperCamelCase )
return model
@property
def _a ( self : List[str] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowercase = ShapERenderer(**__UpperCamelCase )
return model
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.dummy_prior
__lowercase = self.dummy_image_encoder
__lowercase = self.dummy_image_processor
__lowercase = self.dummy_renderer
__lowercase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=__UpperCamelCase , clip_sample=__UpperCamelCase , clip_sample_range=1.0 , )
__lowercase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _a ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(__UpperCamelCase )
else:
__lowercase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__lowercase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = """cpu"""
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**__UpperCamelCase )
__lowercase = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__lowercase = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
__lowercase = output.images[0]
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowercase = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> str:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = torch_device == """cpu"""
__lowercase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , )
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**__UpperCamelCase )
__lowercase = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__lowercase = 1
__lowercase = 2
__lowercase = self.get_dummy_inputs(__UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
__lowercase = batch_size * [inputs[key]]
__lowercase = pipe(**__UpperCamelCase , num_images_per_prompt=__UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : int ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowercase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowercase = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__lowercase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
__lowercase = pipe(
__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 702
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Any = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 0
|
from __future__ import annotations
import math
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if num <= 0:
__lowercase = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(SCREAMING_SNAKE_CASE__ )
__lowercase = [True] * (num + 1)
__lowercase = []
__lowercase = 2
__lowercase = int(math.sqrt(SCREAMING_SNAKE_CASE__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE__ ):
if sieve[i] is True:
__lowercase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 704
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 0
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__snake_case :Optional[int] = DebertaTokenizer
__snake_case :Optional[int] = True
__snake_case :Any = DebertaTokenizerFast
def _a ( self : List[str] ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__lowercase = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__lowercase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowercase = {'''unk_token''': '''[UNK]'''}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def _a ( self : str , **_lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def _a ( self : Optional[Any] , _lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = '''lower newer'''
__lowercase = '''lower newer'''
return input_text, output_text
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = '''lower newer'''
__lowercase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowercase = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tokenizer("""Hello""" , """World""" )
__lowercase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , __snake_case )
@slow
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=__snake_case )
__lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__snake_case )
__lowercase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
__lowercase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
__lowercase = tokenizer.build_inputs_with_special_tokens(__snake_case )
__lowercase = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowercase = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__lowercase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__lowercase = tokenizer(__snake_case , padding=__snake_case )
__lowercase = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
__lowercase = {
'''input_ids''': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowercase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 705
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = "cvt"
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : int=[7, 3, 3] , _lowerCAmelCase : Optional[Any]=[4, 2, 2] , _lowerCAmelCase : int=[2, 1, 1] , _lowerCAmelCase : str=[64, 192, 384] , _lowerCAmelCase : Union[str, Any]=[1, 3, 6] , _lowerCAmelCase : Dict=[1, 2, 10] , _lowerCAmelCase : List[str]=[4.0, 4.0, 4.0] , _lowerCAmelCase : int=[0.0, 0.0, 0.0] , _lowerCAmelCase : Optional[Any]=[0.0, 0.0, 0.0] , _lowerCAmelCase : Any=[0.0, 0.0, 0.1] , _lowerCAmelCase : int=[True, True, True] , _lowerCAmelCase : Any=[False, False, True] , _lowerCAmelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , _lowerCAmelCase : int=[3, 3, 3] , _lowerCAmelCase : List[Any]=[1, 1, 1] , _lowerCAmelCase : List[Any]=[2, 2, 2] , _lowerCAmelCase : Any=[1, 1, 1] , _lowerCAmelCase : List[str]=[1, 1, 1] , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Optional[int]=1e-12 , **_lowerCAmelCase : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = num_channels
__lowercase = patch_sizes
__lowercase = patch_stride
__lowercase = patch_padding
__lowercase = embed_dim
__lowercase = num_heads
__lowercase = depth
__lowercase = mlp_ratio
__lowercase = attention_drop_rate
__lowercase = drop_rate
__lowercase = drop_path_rate
__lowercase = qkv_bias
__lowercase = cls_token
__lowercase = qkv_projection_method
__lowercase = kernel_qkv
__lowercase = padding_kv
__lowercase = stride_kv
__lowercase = padding_q
__lowercase = stride_q
__lowercase = initializer_range
__lowercase = layer_norm_eps
| 706
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = 2
__lowercase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase_ )
if n > 1:
factors.append(lowercase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 0
|
from __future__ import annotations
import numpy as np
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase , __lowercase = np.shape(__UpperCamelCase )
if rows != columns:
__lowercase = (
"""\'table\' has to be of square shaped array but got a """
F'{rows}x{columns} array:\n{table}'
)
raise ValueError(__UpperCamelCase )
__lowercase = np.zeros((rows, columns) )
__lowercase = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
__lowercase = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
__lowercase = (table[i][j] - total) / upper[j][j]
__lowercase = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
__lowercase = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
__lowercase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 0
|
'''simple docstring'''
def snake_case ( ):
'''simple docstring'''
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = 1
__lowercase = 2
while i * i <= n:
__lowercase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def snake_case ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(snake_case_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : int = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
__snake_case :int = 'swin2sr'
__snake_case :Dict = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , _lowerCAmelCase : Optional[int]=64 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=180 , _lowerCAmelCase : Tuple=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : Optional[int]=[6, 6, 6, 6, 6, 6] , _lowerCAmelCase : Tuple=8 , _lowerCAmelCase : List[Any]=2.0 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : int=2 , _lowerCAmelCase : List[str]=1.0 , _lowerCAmelCase : Tuple="1conv" , _lowerCAmelCase : List[Any]="pixelshuffle" , **_lowerCAmelCase : str , ) -> str:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(UpperCamelCase__ )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = upscale
__lowercase = img_range
__lowercase = resi_connection
__lowercase = upsampler
| 710
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 0
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = False, False, False
@dataclass
class __UpperCamelCase :
__snake_case :Optional[int] = None
__snake_case :bool = True
__snake_case :bool = True
__snake_case :Optional[str] = None
# Automatically constructed
__snake_case :ClassVar[str] = "dict"
__snake_case :ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__snake_case :str = field(default='Audio' , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.pa_type
def _a ( self : Tuple , _lowerCAmelCase : Optional[Any] ) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install \'soundfile\'.""" ) from err
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__lowercase = BytesIO()
sf.write(_lowerCamelCase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a \'sampling_rate\' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__lowercase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
__lowercase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_2767
__lowercase = BytesIO(bytes() )
sf.write(_lowerCamelCase , _lowerCamelCase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Dict = None ) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
__lowercase , __lowercase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install \'librosa\' and \'soundfile\'.""" ) from err
__lowercase = xsplitext(_lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
__lowercase = token_per_repo_id or {}
__lowercase = path.split("""::""" )[-1]
try:
__lowercase = string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL )["""repo_id"""]
__lowercase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__lowercase = None
with xopen(_lowerCamelCase , """rb""" , use_auth_token=_lowerCamelCase ) as f:
__lowercase , __lowercase = sf.read(_lowerCamelCase )
else:
__lowercase , __lowercase = sf.read(_lowerCamelCase )
__lowercase = array.T
if self.mono:
__lowercase = librosa.to_mono(_lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__lowercase = librosa.resample(_lowerCamelCase , orig_sr=_lowerCamelCase , target_sr=self.sampling_rate )
__lowercase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _a ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _a ( self : Tuple , _lowerCAmelCase : Any ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
__lowercase = pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
__lowercase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowercase = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
__lowercase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
__lowercase = pa.array([Audio().encode_example(_lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__lowercase = storage.field("""bytes""" )
else:
__lowercase = pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__lowercase = storage.field("""path""" )
else:
__lowercase = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
def _a ( self : List[Any] , _lowerCAmelCase : Union[str, Any] ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_lowerCAmelCase : Optional[Any] ):
with xopen(_lowerCamelCase , """rb""" ) as f:
__lowercase = f.read()
return bytes_
__lowercase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowercase = pa.array(
[os.path.basename(_lowerCamelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
| 711
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Tuple=18 , _lowerCAmelCase : Union[str, Any]=30 , _lowerCAmelCase : List[Any]=400 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Union[str, Any]=32 , _lowerCAmelCase : str=True , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size_divisor
__lowercase = do_rescale
def _a ( self : int ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __UpperCamelCase ( _A , unittest.TestCase ):
__snake_case :Union[str, Any] = GLPNImageProcessor if is_vision_available() else None
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = GLPNImageProcessingTester(self )
@property
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size_divisor""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """resample""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_rescale""" ) )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 712
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__UpperCamelCase : str = logging.getLogger(__name__)
class __UpperCamelCase ( __lowercase ):
__snake_case :int = 'sequence-classification'
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if type(__A ) == dict:
__lowercase = Namespace(**__A )
__lowercase = glue_output_modes[hparams.task]
__lowercase = glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def _a ( self : int , **_lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.model(**__A )
def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowercase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__lowercase = self(**__A )
__lowercase = outputs[0]
__lowercase = self.trainer.lr_schedulers[0]["""scheduler"""]
__lowercase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _a ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.hparams
__lowercase = processors[args.task]()
__lowercase = processor.get_labels()
for mode in ["train", "dev"]:
__lowercase = self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __A )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__lowercase = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
__lowercase = convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __A )
torch.save(__A , __A )
def _a ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Any = False ) -> DataLoader:
"""simple docstring"""
__lowercase = """dev""" if mode == """test""" else mode
__lowercase = self._feature_file(__A )
logger.info("""Loading features from cached file %s""" , __A )
__lowercase = torch.load(__A )
__lowercase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__lowercase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__lowercase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__lowercase = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__lowercase = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def _a ( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowercase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__lowercase = self(**__A )
__lowercase , __lowercase = outputs[:2]
__lowercase = logits.detach().cpu().numpy()
__lowercase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a ( self : List[str] , _lowerCAmelCase : List[str] ) -> tuple:
"""simple docstring"""
__lowercase = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
__lowercase = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__lowercase = np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__lowercase = np.squeeze(__A )
__lowercase = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__lowercase = [[] for _ in range(out_label_ids.shape[0] )]
__lowercase = [[] for _ in range(out_label_ids.shape[0] )]
__lowercase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
__lowercase = dict(results.items() )
__lowercase = results
return ret, preds_list, out_label_list
def _a ( self : Tuple , _lowerCAmelCase : Tuple ) -> dict:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self._eval_end(__A )
__lowercase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a ( self : Tuple , _lowerCAmelCase : Union[str, Any] ) -> dict:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self._eval_end(__A )
__lowercase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a ( _lowerCAmelCase : int , _lowerCAmelCase : str ) -> Any:
"""simple docstring"""
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__A , required=__A , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__A , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def snake_case ( ):
'''simple docstring'''
__lowercase = argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
__lowercase = GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
__lowercase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__lowercase = os.path.join(
"""./results""" , F'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
__lowercase = GLUETransformer(a__ )
__lowercase = generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__lowercase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=a__ ) )
__lowercase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 713
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( UpperCamelCase__ , unittest.TestCase ):
__snake_case :Optional[int] = RobertaTokenizer
__snake_case :str = RobertaTokenizerFast
__snake_case :Dict = True
__snake_case :Dict = {'cls_token': '<s>'}
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase = dict(zip(__A , range(len(__A ) ) ) )
__lowercase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase = {"""unk_token""": """<unk>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__A ) )
def _a ( self : List[str] , **_lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _a ( self : Union[str, Any] , **_lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__A )
def _a ( self : str , _lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
__lowercase = """lower newer"""
__lowercase = """lower newer"""
return input_text, output_text
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase = """lower newer"""
__lowercase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase = tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A , __A )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__A ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__A ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""roberta-base""" )
__lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
__lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
__lowercase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__A , add_prefix_space=__A )
__lowercase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__A , add_prefix_space=__A )
__lowercase = tokenizer.build_inputs_with_special_tokens(__A )
__lowercase = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = """Encode this sequence."""
__lowercase = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
__lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A , __A )
__lowercase = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
__lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A , __A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase = tokenizer.encode(__A , add_special_tokens=__A )
__lowercase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A , __A )
# Testing spaces after special tokens
__lowercase = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__A , lstrip=__A , rstrip=__A )} ) # mask token has a left space
__lowercase = tokenizer.convert_tokens_to_ids(__A )
__lowercase = """Encode <mask> sequence"""
__lowercase = """Encode <mask>sequence"""
__lowercase = tokenizer.encode(__A )
__lowercase = encoded.index(__A )
__lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A , __A )
__lowercase = tokenizer.encode(__A )
__lowercase = encoded.index(__A )
__lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A , __A )
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase = self.rust_tokenizer_class.from_pretrained(__A , **__A )
__lowercase = self.tokenizer_class.from_pretrained(__A , **__A )
__lowercase = """A, <mask> AllenNLP sentence."""
__lowercase = tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
__lowercase = tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
__lowercase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __A )
self.assertEqual(post_processor_state["""trim_offsets"""] , __A )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase = F'{text_of_1_token} {text_of_1_token}'
__lowercase = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
__lowercase = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
__lowercase = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
__lowercase = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
__lowercase = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
__lowercase = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
__lowercase = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
__lowercase = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
__lowercase = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
__lowercase = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) , )
__lowercase = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
__lowercase = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
__lowercase = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
__lowercase = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
| 714
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=7 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[Any]=18 , _lowerCAmelCase : Optional[Any]=30 , _lowerCAmelCase : int=400 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : str=None , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : str=True , _lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , _lowerCAmelCase : Any=[0.5, 0.5, 0.5] , _lowerCAmelCase : List[str]=False , ) -> List[str]:
"""simple docstring"""
__lowercase = size if size is not None else {"""height""": 20, """width""": 20}
__lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
__lowercase = do_reduce_labels
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def snake_case ( ):
'''simple docstring'''
__lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
__lowercase = Image.open(dataset[0]["""file"""] )
__lowercase = Image.open(dataset[1]["""file"""] )
return image, map
def snake_case ( ):
'''simple docstring'''
__lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
__lowercase = Image.open(ds[0]["""file"""] )
__lowercase = Image.open(ds[1]["""file"""] )
__lowercase = Image.open(ds[2]["""file"""] )
__lowercase = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BeitImageProcessingTester(self )
@property
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE )
__lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
__lowercase = []
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
__lowercase = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
__lowercase , __lowercase = prepare_semantic_single_inputs()
__lowercase = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
__lowercase , __lowercase = prepare_semantic_batch_inputs()
__lowercase = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def _a ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__lowercase , __lowercase = prepare_semantic_single_inputs()
__lowercase = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
__lowercase = True
__lowercase = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 715
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
__UpperCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowercase = model_type_to_module_name(lowerCamelCase )
__lowercase = importlib.import_module(F'.{module_name}' , """transformers.models""" )
try:
return getattr(lowerCamelCase , lowerCamelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase , """__name__""" , lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowercase = importlib.import_module("""transformers""" )
if hasattr(lowerCamelCase , lowerCamelCase ):
return getattr(lowerCamelCase , lowerCamelCase )
return None
def snake_case ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , **lowerCamelCase , ):
'''simple docstring'''
__lowercase = get_file_from_repo(
lowerCamelCase , lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , resume_download=lowerCamelCase , proxies=lowerCamelCase , use_auth_token=lowerCamelCase , revision=lowerCamelCase , local_files_only=lowerCamelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(lowerCamelCase , encoding="""utf-8""" ) as reader:
return json.load(lowerCamelCase )
class __UpperCamelCase :
def __init__( self : Dict ) -> Dict:
"""simple docstring"""
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__a )
def _a ( cls : Optional[int] , _lowerCAmelCase : int , **_lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = kwargs.pop("""config""" , __a )
__lowercase = kwargs.pop("""trust_remote_code""" , __a )
__lowercase = True
__lowercase = FeatureExtractionMixin.get_feature_extractor_dict(__a , **__a )
__lowercase = config_dict.get("""feature_extractor_type""" , __a )
__lowercase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__lowercase = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__a , __a ):
__lowercase = AutoConfig.from_pretrained(__a , **__a )
# It could be in `config.feature_extractor_type``
__lowercase = getattr(__a , """feature_extractor_type""" , __a )
if hasattr(__a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
__lowercase = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
__lowercase = feature_extractor_class_from_name(__a )
__lowercase = feature_extractor_auto_map is not None
__lowercase = feature_extractor_class is not None or type(__a ) in FEATURE_EXTRACTOR_MAPPING
__lowercase = resolve_trust_remote_code(
__a , __a , __a , __a )
if has_remote_code and trust_remote_code:
__lowercase = get_class_from_dynamic_module(
__a , __a , **__a )
__lowercase = kwargs.pop("""code_revision""" , __a )
if os.path.isdir(__a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__a , **__a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__a , **__a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__a ) in FEATURE_EXTRACTOR_MAPPING:
__lowercase = FEATURE_EXTRACTOR_MAPPING[type(__a )]
return feature_extractor_class.from_dict(__a , **__a )
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def _a ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__a , __a )
| 716
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.