code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
A_ : int = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
A_ : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
A_ : Optional[int] = dict(zip(vocab, range(len(vocab))))
A_ : str = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = Path(tmpdirname)
A_ : List[Any] = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
A_ : Optional[int] = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
A_ : List[str] = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
A_ : Any = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
A_ : Optional[Any] = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
A_ : Dict = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
A_ : List[Any] = tokenizer(['Making tiny model'], return_tensors='pt')
A_ : Optional[Any] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 333 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowercase : Dict = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
for attribute in key.split(""".""" ):
lowercase : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
lowercase : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
lowercase : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
lowercase : Any = value
elif weight_type == "weight_g":
lowercase : Any = value
elif weight_type == "weight_v":
lowercase : int = value
elif weight_type == "bias":
lowercase : List[Any] = value
else:
lowercase : int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Tuple = []
lowercase : List[Any] = fairseq_model.state_dict()
lowercase : Dict = hf_model.feature_extractor
lowercase : Any = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase : int = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase : Any = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase : List[Any] = True
if "*" in mapped_key:
lowercase : List[str] = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
lowercase : int = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
lowercase : int = """weight_g"""
elif "weight_v" in name:
lowercase : Optional[Any] = """weight_v"""
elif "bias" in name:
lowercase : str = """bias"""
elif "weight" in name:
lowercase : Optional[Any] = """weight"""
else:
lowercase : List[Any] = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f"Unused weights: {unused_weights}" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : int = full_name.split("""conv_layers.""" )[-1]
lowercase : Optional[Any] = name.split(""".""" )
lowercase : Dict = int(items[0] )
lowercase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
lowercase : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
lowercase : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
lowercase : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
lowercase : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : List[Any] = full_name.split("""adaptor.""" )[-1]
lowercase : Tuple = name.split(""".""" )
if items[1].isdigit():
lowercase : Optional[int] = int(items[1] )
else:
lowercase : Union[str, Any] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
lowercase : Any = value
logger.info(f"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
lowercase : str = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
lowercase : Optional[Any] = value
logger.info(f"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
lowercase : int = value
logger.info(f"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
lowercase : int = value
logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
lowercase : str = value
logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
lowercase , lowercase : Optional[int] = emb.weight.shape
lowercase : int = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
lowercase : int = emb.weight.data
return lin_layer
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> List[str]:
lowercase : str = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , add_adapter=SCREAMING_SNAKE_CASE__ , adapter_stride=SCREAMING_SNAKE_CASE__ , adapter_kernel_size=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , output_hidden_size=SCREAMING_SNAKE_CASE__ , )
lowercase : Tuple = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# load model
lowercase , lowercase , lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
lowercase : Optional[Any] = model[0].eval()
# load feature extractor
lowercase : str = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ )
# set weights for wav2vec2 encoder
lowercase : Dict = WavaVecaModel(SCREAMING_SNAKE_CASE__ )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE__ )
# load decoder weights
lowercase : str = MBartForCausalLM(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE__ )
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
lowercase : int = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
lowercase : Any = False
lowercase : Union[str, Any] = MBartaaTokenizer(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = hf_wavavec.config.to_dict()
lowercase : Tuple = tokenizer.pad_token_id
lowercase : int = tokenizer.bos_token_id
lowercase : List[Any] = tokenizer.eos_token_id
lowercase : Union[str, Any] = """mbart50"""
lowercase : List[Any] = """wav2vec2"""
lowercase : List[Any] = tokenizer.eos_token_id
lowercase : str = 250_004
lowercase : List[Any] = tokenizer.eos_token_id
lowercase : str = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
lowercase : Any = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 285 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase : Tuple = """<<<<<<< This should probably be modified because it mentions: """
lowercase : Any = """=======
>>>>>>>
"""
lowercase : List[str] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __snake_case ( lowerCAmelCase ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
lowercase : str = parser.add_parser(
"""convert""" ,help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" ,)
train_parser.add_argument(
"""--tfds_path""" ,type=snake_case ,required=snake_case ,help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" ,)
train_parser.add_argument(
"""--datasets_directory""" ,type=snake_case ,required=snake_case ,help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=snake_case )
def __init__( self ,snake_case ,snake_case ,*snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = get_logger("""datasets-cli/converting""" )
lowercase : Optional[int] = tfds_path
lowercase : Dict = datasets_directory
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
lowercase : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
lowercase : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
lowercase : List[Any] = []
lowercase : Optional[int] = []
lowercase : Dict = {}
if os.path.isdir(self._tfds_path ):
lowercase : int = os.listdir(snake_case )
else:
lowercase : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
lowercase : List[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(snake_case ,encoding="""utf-8""" ) as f:
lowercase : str = f.readlines()
lowercase : Union[str, Any] = []
lowercase : Optional[Any] = False
lowercase : Optional[Any] = False
lowercase : Optional[int] = []
for line in lines:
lowercase : int = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase : Union[str, Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
lowercase : List[Any] = """"""
continue
elif "from absl import logging" in out_line:
lowercase : Optional[int] = """from datasets import logging\n"""
elif "getLogger" in out_line:
lowercase : Any = out_line.replace("""getLogger""" ,"""get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase : Optional[Any] = True
lowercase : Optional[Any] = list(filter(lambda snake_case : e in out_line ,snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + """\n""" )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase : Union[str, Any] = re.sub(snake_case ,snake_case ,snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" ,snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
lowercase : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase : Any = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase : Union[str, Any] = f_name.replace(""".py""" ,"""""" )
lowercase : Optional[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
os.makedirs(snake_case ,exist_ok=snake_case )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.writelines(snake_case )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
lowercase : Optional[int] = os.path.basename(snake_case )
lowercase : int = imports_to_builder_map[f_name.replace(""".py""" ,"""""" )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(snake_case ,snake_case )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 285 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : int = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''mobilenet_v1'''
def __init__(self : List[str] , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : List[Any]=1.0 , _UpperCAmelCase : int=8 , _UpperCAmelCase : int="relu6" , _UpperCAmelCase : Any=True , _UpperCAmelCase : Union[str, Any]=0.999 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Optional[Any]=0.001 , **_UpperCAmelCase : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = min_depth
lowercase__ = hidden_act
lowercase__ = tf_padding
lowercase__ = classifier_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ (self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCamelCase__ (self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCamelCase__ (self : Dict ) -> float:
"""simple docstring"""
return 1E-4
| 305 |
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 305 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple=13 , UpperCamelCase : str=7 , UpperCamelCase : Tuple=True , UpperCamelCase : Tuple=True , UpperCamelCase : Tuple=True , UpperCamelCase : int=True , UpperCamelCase : Dict=99 , UpperCamelCase : Tuple=32 , UpperCamelCase : Optional[Any]=5 , UpperCamelCase : Tuple=4 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Optional[Any]=512 , UpperCamelCase : int=16 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=4 , ):
'''simple docstring'''
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : int = use_attention_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : List[str] = num_choices
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[int] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase , )
return config, input_ids, attention_mask
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Any = config_and_inputs
__UpperCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class_name.from_pretrained("""distilbert-base-uncased""" )
__UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCAmelCase : Any = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__UpperCAmelCase : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : Tuple = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
__UpperCAmelCase : int = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase )
__UpperCAmelCase : Tuple = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
| 320 |
"""simple docstring"""
UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_UpperCamelCase )
__UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data )
__UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6)
else:
__UpperCAmelCase : List[str] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
__UpperCAmelCase : Tuple = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
__UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__UpperCAmelCase : str = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__UpperCAmelCase : List[str] = encoded_data[:-padding]
__UpperCAmelCase : int = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__UpperCAmelCase : Optional[Any] = """""".join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
__UpperCAmelCase : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCamelCase ) , 8 )
]
return bytes(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 | 1 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__snake_case , __snake_case ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__snake_case )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 29 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowercase (SCREAMING_SNAKE_CASE_ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int = 1_00 , ) -> float:
SCREAMING_SNAKE_CASE = x_start
SCREAMING_SNAKE_CASE = fnc(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
SCREAMING_SNAKE_CASE = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
SCREAMING_SNAKE_CASE = xa
SCREAMING_SNAKE_CASE = fxa
return area
if __name__ == "__main__":
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
__UpperCamelCase = 10
while i <= 100000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 113 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class snake_case_ ( __lowercase ):
A_ = 'trajectory_transformer'
A_ = ['past_key_values']
A_ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , _snake_case : str=100 , _snake_case : Any=5 , _snake_case : List[str]=1 , _snake_case : Optional[int]=1 , _snake_case : Dict=249 , _snake_case : Optional[int]=6 , _snake_case : Optional[int]=17 , _snake_case : Optional[int]=25 , _snake_case : List[str]=4 , _snake_case : List[Any]=4 , _snake_case : List[str]=128 , _snake_case : str=0.1 , _snake_case : Dict=0.1 , _snake_case : Any=0.1 , _snake_case : str=0.0_006 , _snake_case : Optional[int]=512 , _snake_case : Dict=0.02 , _snake_case : List[Any]=1E-12 , _snake_case : str=1 , _snake_case : Optional[Any]=True , _snake_case : List[Any]=1 , _snake_case : Optional[int]=50256 , _snake_case : int=50256 , **_snake_case : List[Any] , )->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : List[str] = action_weight
__lowerCAmelCase : int = reward_weight
__lowerCAmelCase : str = value_weight
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : List[Any] = block_size
__lowerCAmelCase : str = action_dim
__lowerCAmelCase : int = observation_dim
__lowerCAmelCase : List[Any] = transition_dim
__lowerCAmelCase : Union[str, Any] = learning_rate
__lowerCAmelCase : Any = n_layer
__lowerCAmelCase : Dict = n_head
__lowerCAmelCase : Union[str, Any] = n_embd
__lowerCAmelCase : List[Any] = embd_pdrop
__lowerCAmelCase : str = attn_pdrop
__lowerCAmelCase : Dict = resid_pdrop
__lowerCAmelCase : Any = initializer_range
__lowerCAmelCase : Any = layer_norm_eps
__lowerCAmelCase : str = kaiming_initializer_range
__lowerCAmelCase : Any = use_cache
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) | 351 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
_UpperCAmelCase = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
_UpperCAmelCase = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : int = json.loads(f.read() )
__lowerCAmelCase : Dict = collections.OrderedDict()
__lowerCAmelCase : str = collections.OrderedDict()
__lowerCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : Tuple = f.readlines()
__lowerCAmelCase : Tuple = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = b
__lowerCAmelCase : Dict = idx
for wd in b:
__lowerCAmelCase : List[str] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : str , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Any="<|endoftext|>" , _snake_case : str="<|endoftext|>" , _snake_case : str="<|startoftext|>" , _snake_case : List[Any]="<|endoftext|>" , _snake_case : str=False , **_snake_case : List[Any] , )->Union[str, Any]:
'''simple docstring'''
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__lowerCAmelCase : Any = do_clean_text
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = load_vocab_and_emoji(_snake_case , _snake_case )
__lowerCAmelCase : int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase__ ( self : int )->str:
'''simple docstring'''
return len(self.raw_vocab )
def UpperCAmelCase__ ( self : Tuple )->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Any , _snake_case : str )->Optional[int]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Optional[Any] )->Any:
'''simple docstring'''
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : int , _snake_case : Any )->int:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : int )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = """""".join(_snake_case ).strip()
return out_string
def UpperCAmelCase__ ( self : List[str] , _snake_case : "Conversation" )->List[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
__lowerCAmelCase : List[str] = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = 0
if os.path.isdir(_snake_case ):
__lowerCAmelCase : Dict = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase : List[Any] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__lowerCAmelCase : Union[str, Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__lowerCAmelCase : Dict = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__lowerCAmelCase : List[str] = token_index
writer.write(""",""".join(_snake_case ) + """\n""" )
index += 1
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class snake_case_ ( __lowercase ):
def __init__( self : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Optional[int] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = vocab # same as swe
__lowerCAmelCase : str = ids_to_tokens # same as bpe
__lowerCAmelCase : Dict = emoji
__lowerCAmelCase : int = np.max([len(_snake_case ) for w in self.vocab.keys()] )
__lowerCAmelCase : str = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__lowerCAmelCase : Optional[Any] = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__lowerCAmelCase : Tuple = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__lowerCAmelCase : Optional[Any] = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCAmelCase : Union[str, Any] = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCAmelCase : str = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__lowerCAmelCase : List[Any] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__lowerCAmelCase : Union[str, Any] = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__lowerCAmelCase : str = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : int )->int:
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCAmelCase__ ( self : List[str] , _snake_case : Any )->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.content_repattera.sub("""<URL>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<EMAIL>""" , _snake_case )
__lowerCAmelCase : Optional[Any] = self.content_repattera.sub("""<TEL>""" , _snake_case )
__lowerCAmelCase : str = self.content_repattera.sub("""<DATE>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<DATE>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<PRICE>""" , _snake_case )
__lowerCAmelCase : List[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCAmelCase : str = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def UpperCAmelCase__ ( self : str , _snake_case : List[Any] , _snake_case : Optional[int]=False )->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = text.replace(""" """ , """<SP>""" )
__lowerCAmelCase : Optional[int] = text.replace(""" """ , """<SP>""" )
__lowerCAmelCase : Union[str, Any] = text.replace("""\r\n""" , """<BR>""" )
__lowerCAmelCase : Tuple = text.replace("""\n""" , """<BR>""" )
__lowerCAmelCase : List[str] = text.replace("""\r""" , """<BR>""" )
__lowerCAmelCase : Dict = text.replace("""\t""" , """<TAB>""" )
__lowerCAmelCase : Dict = text.replace("""—""" , """ー""" )
__lowerCAmelCase : Tuple = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCAmelCase : Optional[Any] = text.replace(_snake_case , _snake_case )
if clean:
__lowerCAmelCase : List[Any] = self.clean_text(_snake_case )
def check_simbol(_snake_case : List[str] ):
__lowerCAmelCase : Optional[int] = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
__lowerCAmelCase : Optional[Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(_snake_case : Union[str, Any] ):
__lowerCAmelCase : Dict = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
__lowerCAmelCase : List[str] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Dict = []
while pos < len(_snake_case ):
__lowerCAmelCase : str = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__lowerCAmelCase : Tuple = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
__lowerCAmelCase : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
__lowerCAmelCase : Tuple = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
__lowerCAmelCase : int = e
else:
__lowerCAmelCase : Dict = pos + 1
__lowerCAmelCase : Dict = text[pos:end]
if check_simbol(_snake_case ):
result.append("""<KIGOU>""" )
elif checkuae(_snake_case ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__lowerCAmelCase : int = end
return result
def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[int] , _snake_case : List[Any]="\n" )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Optional[Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCAmelCase : Optional[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCAmelCase : Dict = """""".join(_snake_case )
return text | 232 | 0 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_UpperCamelCase : List[Any] = {'UserAgent': UserAgent().random}
def __UpperCAmelCase ( A : str ) -> dict:
UpperCAmelCase_ : Tuple = script.contents[0]
UpperCAmelCase_ : Any = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class snake_case__ :
def __init__( self : int , _A : Tuple ) -> List[Any]:
UpperCAmelCase_ : Dict = F"https://www.instagram.com/{username}/"
UpperCAmelCase_ : Optional[int] = self.get_json()
def A ( self : Optional[int] ) -> dict:
UpperCAmelCase_ : List[Any] = requests.get(self.url , headers=_A ).text
UpperCAmelCase_ : Tuple = BeautifulSoup(_A , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Dict ) -> str:
return F"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def A ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def A ( self : Tuple ) -> str:
return self.user_data["full_name"]
@property
def A ( self : List[Any] ) -> str:
return self.user_data["biography"]
@property
def A ( self : str ) -> str:
return self.user_data["business_email"]
@property
def A ( self : Optional[int] ) -> str:
return self.user_data["external_url"]
@property
def A ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def A ( self : Union[str, Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def A ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def A ( self : str ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def A ( self : Any ) -> bool:
return self.user_data["is_verified"]
@property
def A ( self : Tuple ) -> bool:
return self.user_data["is_private"]
def __UpperCAmelCase ( A : str = "github" ) -> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
UpperCAmelCase_ : List[str] = InstagramUser(A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : List[str] = InstagramUser('github')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 304 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class snake_case__ ( unittest.TestCase):
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A ( cls : int ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" )
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase_ : int = False
return models_are_equal
@require_flax
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Any = FlaxBertModel(_A )
UpperCAmelCase_ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) )
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase_ : Tuple = FlaxBertModel(_A )
UpperCAmelCase_ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertTrue(check_models_equal(_A , _A ) )
def A ( self : int ) -> Optional[int]:
UpperCAmelCase_ : int = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = '''bert'''
UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A )
self.assertIsNotNone(_A )
| 304 | 1 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__: Optional[Any] = logging.get_logger(__name__)
__magic_name__: int = "▁"
__magic_name__: Dict = {"vocab_file": "prophetnet.tokenizer"}
__magic_name__: Optional[int] = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
__magic_name__: List[str] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
__magic_name__: Optional[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Optional[Any] = collections.OrderedDict()
with open(_A, """r""", encoding="""utf-8""" ) as reader:
__magic_name__ : Tuple = reader.readlines()
for index, token in enumerate(_A ):
__magic_name__ : Optional[Any] = token.rstrip("""\n""" )
__magic_name__ : Any = index
return vocab
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
__magic_name__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
__magic_name__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__magic_name__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__magic_name__ : Tuple = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
__magic_name__ : Optional[int] = F'[unused{i}]'
__magic_name__ : Tuple = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__magic_name__ : Any = 12
__magic_name__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCAmelCase__ )
def __getstate__( self ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.__dict__.copy()
__magic_name__ : Optional[Any] = None
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Optional[int] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : List[str] = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return ([0] * len(lowerCAmelCase__ )) + [1]
return ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : int = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __magic_name__ ( self ) -> List[Any]:
return len(self.sp_model ) + self.fairseq_offset
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ : Dict = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
__magic_name__ : Optional[Any] = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__magic_name__ : Dict = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 138 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__magic_name__: Union[str, Any] = False
@skip_mps
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[int] = StableDiffusionAttendAndExcitePipeline
lowercase__ : Tuple = False
lowercase__ : List[str] = TEXT_TO_IMAGE_PARAMS
lowercase__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
lowercase__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __magic_name__ ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls ) -> Optional[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , )
__magic_name__ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
__magic_name__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__magic_name__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__magic_name__ : Any = CLIPTextModel(lowerCAmelCase__ )
__magic_name__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__magic_name__ : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[Any]:
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__magic_name__ : int = torch.manual_seed(lowerCAmelCase__ )
else:
__magic_name__ : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = """cpu"""
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : int = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__magic_name__ : str = pipe(**lowerCAmelCase__ ).images
__magic_name__ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__magic_name__ : Dict = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
__magic_name__ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def __magic_name__ ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __magic_name__ ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self ) -> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __magic_name__ ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __magic_name__ ( self ) -> Any:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __magic_name__ ( self ) -> Dict:
super().test_save_load_local(expected_max_difference=5e-4 )
def __magic_name__ ( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase ):
@classmethod
def __magic_name__ ( cls ) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls ) -> List[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = torch.manual_seed(51 )
__magic_name__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__magic_name__ : List[str] = """a painting of an elephant with glasses"""
__magic_name__ : Any = [5, 7]
__magic_name__ : List[Any] = pipe(
prompt=lowerCAmelCase__ , token_indices=lowerCAmelCase__ , guidance_scale=7.5 , generator=lowerCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
__magic_name__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 138 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =VideoToVideoSDPipeline
UpperCamelCase__ : str =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
UpperCamelCase__ : str =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
UpperCamelCase__ : List[Any] =PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCamelCase__ : List[Any] =False
# No `output_type`.
UpperCamelCase__ : List[str] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : str =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase : List[Any] =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__UpperCamelCase : List[str] =CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : Dict =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : List[Any] ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : str =floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Optional[int] =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Union[str, Any] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Dict ={
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : str =self.get_dummy_components()
__UpperCamelCase : Any =VideoToVideoSDPipeline(**lowerCamelCase__ )
__UpperCamelCase : str =sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Dict ='np'
__UpperCamelCase : List[Any] =sd_pipe(**lowerCamelCase__ ).frames
__UpperCamelCase : Optional[int] =frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase : Union[str, Any] =np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowercase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __lowercase ( self ):
"""simple docstring"""
pass
def __lowercase ( self ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase : str =torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase : Union[str, Any] =torch.randn((1, 10, 3, 1024, 576) , generator=lowerCamelCase__ )
__UpperCamelCase : List[Any] =video.to('cuda' )
__UpperCamelCase : Union[str, Any] ='Spiderman is surfing'
__UpperCamelCase : Tuple =pipe(lowerCamelCase__ , video=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=3 , output_type='pt' ).frames
__UpperCamelCase : List[str] =np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BioGptTokenizer
__lowerCAmelCase = False
def A (self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A (self : Tuple , _lowerCAmelCase : List[str] ):
A = """lower newer"""
A = """lower newer"""
return input_text, output_text
def A (self : List[Any] ):
A = BioGptTokenizer(self.vocab_file , self.merges_file )
A = """lower"""
A = ["""low""", """er</w>"""]
A = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
A = tokens + ["""<unk>"""]
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A (self : Union[str, Any] ):
A = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 258 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ : str =logging.get_logger(__name__)
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 162 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase__ : Optional[Any] ='src/transformers'
lowerCAmelCase__ : int ='docs/source/en/tasks'
def a__ ( A__, A__, A__ ):
with open(A__, 'r', encoding='utf-8', newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : Any = 0
while not lines[start_index].startswith(A__ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : int = start_index
while not lines[end_index].startswith(A__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ : Any =direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase__ : Dict ={
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase__ : Union[str, Any] ={
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A__, set() )
SCREAMING_SNAKE_CASE_ : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def a__ ( A__, A__=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = _find_text_in_file(
filename=os.path.join(A__, A__ ), start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->', end_prompt='<!--End of the generated tip-->', )
SCREAMING_SNAKE_CASE_ : str = get_model_list_for_task(A__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(A__, A__ ), 'w', encoding='utf-8', newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ : Union[str, Any] =parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 162 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a (UpperCAmelCase__ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int = KandinskyInpaintPipeline
_SCREAMING_SNAKE_CASE :Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_SCREAMING_SNAKE_CASE :Optional[int] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_SCREAMING_SNAKE_CASE :Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE :Optional[Any] = False
@property
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def _a ( self ) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self ) -> Tuple:
"""simple docstring"""
return 100
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _a ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
SCREAMING_SNAKE_CASE__ : Any = MultilingualCLIP(lowercase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = text_encoder.eval()
return text_encoder
@property
def _a ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(**lowercase_ )
return model
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dummy_unet
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE__ : List[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowercase_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _a ( self , _a , _a=0 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase_ )
# create init_image
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.fromarray(np.uinta(lowercase_ ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE__ : List[Any] = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : Any = 0
if str(lowercase_ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.manual_seed(lowercase_ )
else:
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
SCREAMING_SNAKE_CASE__ : Dict = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : int = self.pipeline_class(**lowercase_ )
SCREAMING_SNAKE_CASE__ : str = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**self.get_dummy_inputs(lowercase_ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : str = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : str = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _a ( self ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
SCREAMING_SNAKE_CASE__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE__ : Tuple = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Tuple = '''a hat'''
SCREAMING_SNAKE_CASE__ : List[str] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : List[str] = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE__ : str = pipeline(
lowercase_ , image=lowercase_ , mask_image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 132 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ : Dict = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase_ : List[str] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = GPTaTokenizer
def __init__( self : Optional[int] , lowercase_ : int=None , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple="<|endoftext|>" , lowercase_ : str="<|endoftext|>" , lowercase_ : Dict="<|endoftext|>" , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('''add_bos_token''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , lowercase_) != add_prefix_space:
SCREAMING_SNAKE_CASE_ : int = getattr(lowercase_ , pre_tok_state.pop('''type'''))
SCREAMING_SNAKE_CASE_ : str = add_prefix_space
SCREAMING_SNAKE_CASE_ : Dict = pre_tok_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = add_prefix_space
def _SCREAMING_SNAKE_CASE ( self : str , *lowercase_ : List[Any] , **lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''is_split_into_words''' , lowercase_)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : List[str] , **lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.get('''is_split_into_words''' , lowercase_)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : "Conversation"):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_) + [self.eos_token_id])
if len(lowercase_) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : Any = input_ids[-self.model_max_length :]
return input_ids
| 91 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : list[int], __snake_case : int ) -> Any:
"""simple docstring"""
A__ : List[str] =len(__snake_case )
A__ : Dict =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A__ : Optional[int] =True
# sum is not zero and set is empty then false
for i in range(1, required_sum + 1 ):
A__ : Optional[Any] =False
for i in range(1, arr_len + 1 ):
for j in range(1, required_sum + 1 ):
if arr[i - 1] > j:
A__ : Tuple =subset[i - 1][j]
if arr[i - 1] <= j:
A__ : Tuple =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = XLMRobertaTokenizer
__snake_case = XLMRobertaTokenizerFast
__snake_case = True
__snake_case = True
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Any =XLMRobertaTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] ="""<pad>"""
A__ : Any =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
A__ : int =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCAmelCase_ ) , 10_02 )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
A__ : List[Any] =XLMRobertaTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
A__ : Tuple =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ : Optional[int] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Optional[int] =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A__ : Union[str, Any] =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A__ : Dict =(self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : List[str] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Union[str, Any] =self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Optional[Any] =tempfile.mkdtemp()
A__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCAmelCase_ )
A__ : Union[str, Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A__ : List[str] =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
A__ : Any =tokenizer_r.from_pretrained(lowerCAmelCase_ )
A__ : Union[str, Any] =tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=True
A__ : List[str] =tempfile.mkdtemp()
A__ : List[str] =tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
A__ : List[Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
A__ : str =tokenizer_r.from_pretrained(lowerCAmelCase_ )
A__ : List[Any] =tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=False
A__ : List[str] =tempfile.mkdtemp()
A__ : Dict =tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
A__ : List[Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ : Optional[int] =tokenizer_r.from_pretrained(lowerCAmelCase_ )
A__ : str =tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
@cached_property
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_ , f.name )
A__ : Dict =XLMRobertaTokenizer(f.name , keep_accents=lowerCAmelCase_ )
A__ : Optional[Any] =pickle.dumps(lowerCAmelCase_ )
pickle.loads(lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ : Any =self.get_tokenizer()
A__ : Any =self.get_rust_tokenizer()
A__ : Optional[Any] ="""I was born in 92000, and this is falsé."""
A__ : List[str] =tokenizer.tokenize(lowerCAmelCase_ )
A__ : int =rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : str =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
A__ : Dict =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Union[str, Any] =self.get_rust_tokenizer()
A__ : Union[str, Any] =tokenizer.encode(lowerCAmelCase_ )
A__ : Optional[Any] =rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
A__ : Optional[Any] ="""Hello World!"""
A__ : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : List[Any] =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A__ : Optional[Any] =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
# fmt: off
A__ : List[Any] ={"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 136 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 1
lowerCamelCase_ = 3
lowerCamelCase_ = (32, 32)
lowerCamelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.dummy_cond_unet_upscale
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = DDIMScheduler(prediction_type="v_prediction" )
lowerCamelCase_ = self.dummy_vae
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase , low_res_scheduler=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , max_noise_level=350 , )
lowerCamelCase_ = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
lowerCamelCase_ = "A painting of a squirrel eating a burger"
lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowerCamelCase_ = sd_pipe(
[prompt] , image=UpperCamelCase , generator=UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ = output.images
lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowerCamelCase_ = sd_pipe(
[prompt] , image=UpperCamelCase , generator=UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=UpperCamelCase , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
lowerCamelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCamelCase_ = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.dummy_cond_unet_upscale
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = DDIMScheduler(prediction_type="v_prediction" )
lowerCamelCase_ = self.dummy_vae
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase , low_res_scheduler=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , max_noise_level=350 , )
lowerCamelCase_ = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
lowerCamelCase_ = "A painting of a squirrel eating a burger"
lowerCamelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ = output.images
assert image.shape[0] == 2
lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowerCamelCase_ = sd_pipe(
[prompt] , image=UpperCamelCase , generator=UpperCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.dummy_cond_unet_upscale
lowerCamelCase_ = DDPMScheduler()
lowerCamelCase_ = DDIMScheduler(prediction_type="v_prediction" )
lowerCamelCase_ = self.dummy_vae
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCamelCase_ = unet.half()
lowerCamelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase , low_res_scheduler=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , max_noise_level=350 , )
lowerCamelCase_ = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
lowerCamelCase_ = "A painting of a squirrel eating a burger"
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sd_pipe(
[prompt] , image=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=2 , output_type="np" , ).images
lowerCamelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
lowerCamelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase_ = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = "a cat sitting on a park bench"
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , generator=UpperCamelCase , output_type="np" , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
lowerCamelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ = "a cat sitting on a park bench"
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , generator=UpperCamelCase , output_type="np" , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCamelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = "a cat sitting on a park bench"
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , output_type="np" , )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 55 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a_ : str = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
a_ : int = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
a_ : Tuple = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=False ):
"""simple docstring"""
if rouge_types is None:
lowerCamelCase_ = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
lowerCamelCase_ = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase , use_stemmer=UpperCamelCase )
if use_aggregator:
lowerCamelCase_ = scoring.BootstrapAggregator()
else:
lowerCamelCase_ = []
for ref, pred in zip(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = scorer.score(UpperCamelCase , UpperCamelCase )
if use_aggregator:
aggregator.add_scores(UpperCamelCase )
else:
scores.append(UpperCamelCase )
if use_aggregator:
lowerCamelCase_ = aggregator.aggregate()
else:
lowerCamelCase_ = {}
for key in scores[0]:
lowerCamelCase_ = [score[key] for score in scores]
return result
| 55 | 1 |
import argparse
import os
import re
_UpperCAmelCase : int = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_UpperCAmelCase : int = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
_UpperCAmelCase : Optional[Any] = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :bool = False ) -> str:
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : List[str] = f.read()
__lowerCAmelCase : List[Any] = content.split("""\n""" )
__lowerCAmelCase : int = []
__lowerCAmelCase : Optional[int] = 0
while line_idx < len(SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__lowerCAmelCase : Tuple = len(re.search(r"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__lowerCAmelCase : str = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__lowerCAmelCase : Tuple = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__lowerCAmelCase : Optional[Any] = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : _re_identifier.search(SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(SCREAMING_SNAKE_CASE ) )
elif "\n".join(SCREAMING_SNAKE_CASE ) != content:
return True
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :bool = False ) -> Union[str, Any]:
__lowerCAmelCase : str = [os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for f in os.listdir(SCREAMING_SNAKE_CASE ) if f.endswith(""".py""" )]
__lowerCAmelCase : List[str] = [sort_auto_mapping(SCREAMING_SNAKE_CASE , overwrite=SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = [f for f, d in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {", ".join(SCREAMING_SNAKE_CASE )}. Run `make style` to fix'''
""" this.""" )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_UpperCAmelCase : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only) | 353 |
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase = [8, 5, 9, 7]
_UpperCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case_ :
def __init__( self : Union[str, Any] , _snake_case : list[int] , _snake_case : list[list[int]] , _snake_case : list[list[int]] , )->None:
'''simple docstring'''
__lowerCAmelCase : str = claim_vector
__lowerCAmelCase : List[Any] = allocated_resources_table
__lowerCAmelCase : str = maximum_claim_table
def UpperCAmelCase__ ( self : Tuple )->list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase__ ( self : int )->list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase__ ( self : Optional[int] )->list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase__ ( self : Union[str, Any] )->dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(_snake_case ): i for i in self.__need()}
def UpperCAmelCase__ ( self : Dict , **_snake_case : Optional[Any] )->None:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : Any = self.__allocated_resources_table
__lowerCAmelCase : List[Any] = self.__available_resources()
__lowerCAmelCase : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
__lowerCAmelCase : Optional[Any] = False
for each_need in need_list:
__lowerCAmelCase : Optional[int] = True
for index, need in enumerate(_snake_case ):
if need > available_resources[index]:
__lowerCAmelCase : int = False
break
if execution:
__lowerCAmelCase : int = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Any = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_snake_case )
# update available/freed resources stack
__lowerCAmelCase : int = np.array(_snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(_snake_case ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(_snake_case ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(_snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
_snake_case : str = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
_snake_case : Tuple = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
if len(snake_case__ ) != 32:
raise ValueError("""Input must be of length 32""" )
_snake_case : Optional[int] = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
_snake_case : Optional[Any] = format(snake_case__ , """08x""" )[-8:]
_snake_case : Optional[Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
_snake_case : Union[str, Any] = B""""""
for char in message:
bit_string += format(snake_case__ , """08b""" ).encode("""utf-8""" )
_snake_case : List[Any] = format(len(snake_case__ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(snake_case__ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
if len(snake_case__ ) % 5_12 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(snake_case__ ) , 5_12 ):
_snake_case : List[str] = bit_string[pos : pos + 5_12]
_snake_case : List[str] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
_snake_case : Optional[int] = format(snake_case__ , """032b""" )
_snake_case : str = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(snake_case__ , 2 )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return (a + b) % 2**32
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ (snake_case__ : bytes ):
"""simple docstring"""
_snake_case : Any = preprocess(snake_case__ )
_snake_case : Optional[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_snake_case : Union[str, Any] = 0x6745_2301
_snake_case : List[Any] = 0xEFCD_AB89
_snake_case : Optional[Any] = 0x98BA_DCFE
_snake_case : Optional[int] = 0x1032_5476
_snake_case : Tuple = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(snake_case__ ):
_snake_case : Tuple = aa
_snake_case : str = ba
_snake_case : int = ca
_snake_case : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_snake_case : int = d ^ (b & (c ^ d))
_snake_case : Dict = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_snake_case : Tuple = c ^ (d & (b ^ c))
_snake_case : int = (5 * i + 1) % 16
elif i <= 47:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : Union[str, Any] = (3 * i + 5) % 16
else:
_snake_case : Tuple = c ^ (b | not_aa(snake_case__ ))
_snake_case : Optional[int] = (7 * i) % 16
_snake_case : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
_snake_case : List[str] = d
_snake_case : List[Any] = c
_snake_case : str = b
_snake_case : List[str] = sum_aa(snake_case__ , left_rotate_aa(snake_case__ , shift_amounts[i] ) )
# Add hashed chunk to running total
_snake_case : Union[str, Any] = sum_aa(snake_case__ , snake_case__ )
_snake_case : str = sum_aa(snake_case__ , snake_case__ )
_snake_case : Any = sum_aa(snake_case__ , snake_case__ )
_snake_case : List[str] = sum_aa(snake_case__ , snake_case__ )
_snake_case : Any = reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
_a = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_a = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_a = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ), codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int ):
__lowercase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
__lowercase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
__lowercase = evaluate(dataset=UpperCAmelCase__, predictions=UpperCAmelCase__ )
return score
| 17 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__A : str = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
__A : Union[str, Any] = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = list(s_dict.keys() )
for key in keys:
A = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A = new_key.replace(lowercase__ , lowercase__ )
print(F"""{key} -> {new_key}""" )
A = s_dict.pop(lowercase__ )
return s_dict
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A , A = emb.weight.shape
A = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
A = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
os.makedirs(lowercase__ , exist_ok=lowercase__ )
A = os.path.basename(lowercase__ )
A = url.split("/" )[-2]
A = os.path.join(lowercase__ , lowercase__ )
if os.path.exists(lowercase__ ) and not os.path.isfile(lowercase__ ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase__ ):
A = open(lowercase__ , "rb" ).read()
if hashlib.shaaaa(lowercase__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase__ ) as source, open(lowercase__ , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowercase__ , unit_divisor=1_024 ) as loop:
while True:
A = source.read(8_192 )
if not buffer:
break
output.write(lowercase__ )
loop.update(len(lowercase__ ) )
A = open(lowercase__ , "rb" ).read()
if hashlib.shaaaa(lowercase__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
A = _download(_MODELS[checkpoint_path] )
else:
A = torch.load(lowercase__ , map_location="cpu" )
A = original_checkpoint["dims"]
A = original_checkpoint["model_state_dict"]
A = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowercase__ )
rename_keys(lowercase__ )
A = True
A = state_dict["decoder.layers.0.fc1.weight"].shape[0]
A = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowercase__ , decoder_ffn_dim=lowercase__ , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
A = WhisperForConditionalGeneration(lowercase__ )
A , A = model.model.load_state_dict(lowercase__ , strict=lowercase__ )
if len(lowercase__ ) > 0 and not set(lowercase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
A = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A = proj_out_weights
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__A : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : str = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
SCREAMING_SNAKE_CASE = checkpoint
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = vae_state_dict['encoder.conv_in.weight']
SCREAMING_SNAKE_CASE = vae_state_dict['encoder.conv_in.bias']
SCREAMING_SNAKE_CASE = vae_state_dict['encoder.conv_out.weight']
SCREAMING_SNAKE_CASE = vae_state_dict['encoder.conv_out.bias']
SCREAMING_SNAKE_CASE = vae_state_dict['encoder.norm_out.weight']
SCREAMING_SNAKE_CASE = vae_state_dict['encoder.norm_out.bias']
SCREAMING_SNAKE_CASE = vae_state_dict['decoder.conv_in.weight']
SCREAMING_SNAKE_CASE = vae_state_dict['decoder.conv_in.bias']
SCREAMING_SNAKE_CASE = vae_state_dict['decoder.conv_out.weight']
SCREAMING_SNAKE_CASE = vae_state_dict['decoder.conv_out.bias']
SCREAMING_SNAKE_CASE = vae_state_dict['decoder.norm_out.weight']
SCREAMING_SNAKE_CASE = vae_state_dict['decoder.norm_out.bias']
SCREAMING_SNAKE_CASE = vae_state_dict['quant_conv.weight']
SCREAMING_SNAKE_CASE = vae_state_dict['quant_conv.bias']
SCREAMING_SNAKE_CASE = vae_state_dict['post_quant_conv.weight']
SCREAMING_SNAKE_CASE = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
SCREAMING_SNAKE_CASE = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
SCREAMING_SNAKE_CASE = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
SCREAMING_SNAKE_CASE = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {'old': F'down.{i}.block', 'new': F'down_blocks.{i}.resnets'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if 'encoder.mid.block' in key]
SCREAMING_SNAKE_CASE = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
SCREAMING_SNAKE_CASE = renew_vae_attention_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
conv_attn_to_linear(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
SCREAMING_SNAKE_CASE = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {'old': F'up.{block_id}.block', 'new': F'up_blocks.{i}.resnets'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if 'decoder.mid.block' in key]
SCREAMING_SNAKE_CASE = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
SCREAMING_SNAKE_CASE = renew_vae_attention_paths(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
conv_attn_to_linear(SCREAMING_SNAKE_CASE_ )
return new_checkpoint
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , ) -> Union[str, Any]:
# Only support V1
SCREAMING_SNAKE_CASE = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
SCREAMING_SNAKE_CASE = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE = OmegaConf.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 5_12
SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE = {}
with safe_open(SCREAMING_SNAKE_CASE_ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE = f.get_tensor(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )['state_dict']
# Convert the VAE model.
SCREAMING_SNAKE_CASE = create_vae_diffusers_config(SCREAMING_SNAKE_CASE_ , image_size=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = custom_convert_ldm_vae_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = AutoencoderKL(**SCREAMING_SNAKE_CASE_ )
vae.load_state_dict(SCREAMING_SNAKE_CASE_ )
vae.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
__UpperCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 113 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
SCREAMING_SNAKE_CASE = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
SCREAMING_SNAKE_CASE = F'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , 'README.md' )
print(F'Generating {path}' )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
__UpperCamelCase = Path(__file__).resolve().parent.parent.parent
__UpperCamelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCamelCase,__UpperCamelCase,__UpperCamelCase = model_name.split('''-''')
__UpperCamelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 113 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'unispeech'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="mean", __a=False, __a=False, __a=256, __a=80, __a=0, __a=1, __a=2, __a=0.5, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : int = feat_extract_norm
_lowerCAmelCase : Tuple = feat_extract_activation
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : int = list(__a)
_lowerCAmelCase : Any = list(__a)
_lowerCAmelCase : Tuple = conv_bias
_lowerCAmelCase : Optional[int] = num_conv_pos_embeddings
_lowerCAmelCase : int = num_conv_pos_embedding_groups
_lowerCAmelCase : Union[str, Any] = len(self.conv_dim)
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = hidden_dropout
_lowerCAmelCase : Dict = attention_dropout
_lowerCAmelCase : Any = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : str = final_dropout
_lowerCAmelCase : List[str] = layerdrop
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : int = num_ctc_classes
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Dict = do_stable_layer_norm
_lowerCAmelCase : str = use_weighted_layer_sum
_lowerCAmelCase : int = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[int] = apply_spec_augment
_lowerCAmelCase : Dict = mask_time_prob
_lowerCAmelCase : Dict = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : Any = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Any = num_codevectors_per_group
_lowerCAmelCase : List[Any] = num_codevector_groups
_lowerCAmelCase : Dict = contrastive_logits_temperature
_lowerCAmelCase : Union[str, Any] = feat_quantizer_dropout
_lowerCAmelCase : Union[str, Any] = num_negatives
_lowerCAmelCase : Any = codevector_dim
_lowerCAmelCase : List[Any] = proj_codevector_dim
_lowerCAmelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Union[str, Any] = ctc_loss_reduction
_lowerCAmelCase : Union[str, Any] = ctc_zero_infinity
# pretraining loss
_lowerCAmelCase : str = replace_prob
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 361 |
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300 | 0 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 298 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict:
__UpperCamelCase : Dict = parent
__UpperCamelCase : Any = do_resize
__UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8}
__UpperCamelCase : Any = size_divisor
__UpperCamelCase : Optional[int] = do_rescale
__UpperCamelCase : Union[str, Any] = rescale_factor
__UpperCamelCase : int = do_normalize
__UpperCamelCase : List[Any] = do_center_crop
__UpperCamelCase : Optional[int] = image_mean
__UpperCamelCase : Tuple = image_std
__UpperCamelCase : Tuple = do_pad
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Dict = min_resolution
__UpperCamelCase : Optional[Any] = max_resolution
def a_ (self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
__UpperCamelCase : List[str] = self.size["shortest_edge"]
__UpperCamelCase : Optional[int] = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
__UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w
else:
__UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size
__UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size:
__UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = newh * scale
__UpperCamelCase : Union[str, Any] = neww * scale
__UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCamelCase , __UpperCamelCase : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCamelCase : int = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BridgeTowerImageProcessor if is_vision_available() else None
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self )
@property
def a_ (self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) )
def a_ (self ) -> List[str]:
pass
def a_ (self ) -> List[Any]:
# Initialize image processor
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> Tuple:
# Initialize image processor
__UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> int:
# Initialize image processor
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 298 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _A :
def __init__( self : Dict , __magic_name__ : str , __magic_name__ : List[str]=3 , __magic_name__ : int=32 , __magic_name__ : List[Any]=3 , __magic_name__ : List[Any]=10 , __magic_name__ : int=[8, 16, 32, 64] , __magic_name__ : Tuple=[1, 1, 2, 1] , __magic_name__ : Optional[int]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]="relu" , __magic_name__ : str=3 , __magic_name__ : Any=None , __magic_name__ : Dict=["stage2", "stage3", "stage4"] , __magic_name__ : Any=[2, 3, 4] , __magic_name__ : Optional[Any]=1 , ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = parent
__snake_case : Any = batch_size
__snake_case : int = image_size
__snake_case : Optional[int] = num_channels
__snake_case : Tuple = embeddings_size
__snake_case : Tuple = hidden_sizes
__snake_case : int = depths
__snake_case : str = is_training
__snake_case : List[Any] = use_labels
__snake_case : str = hidden_act
__snake_case : Optional[int] = num_labels
__snake_case : List[str] = scope
__snake_case : Optional[int] = len(__magic_name__ )
__snake_case : Tuple = out_features
__snake_case : str = out_indices
__snake_case : int = num_groups
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowercase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = BitModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.num_labels
__snake_case : str = BitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : str , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = BitBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[Any] = model(__magic_name__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case : int = None
__snake_case : str = BitBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[str] = model(__magic_name__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Optional[int] = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: List[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowercase__: List[str] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowercase__: Optional[int] = False
lowercase__: List[Any] = False
lowercase__: Dict = False
lowercase__: Any = False
lowercase__: Dict = False
def lowercase__ ( self : Any ) -> Dict:
"""simple docstring"""
__snake_case : int = BitModelTester(self )
__snake_case : Any = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowercase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = model_class(__magic_name__ )
__snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[Any] = [*signature.parameters.keys()]
__snake_case : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(config=__magic_name__ )
for name, module in model.named_modules():
if isinstance(__magic_name__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ):
__snake_case : List[str] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case : Any = layer_type
__snake_case : Union[str, Any] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Dict = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[int] = BitModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Dict:
"""simple docstring"""
__snake_case : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__magic_name__ )
__snake_case : Optional[int] = self.default_image_processor
__snake_case : Any = prepare_img()
__snake_case : Any = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : str = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
@require_torch
class _A ( __lowercase , unittest.TestCase ):
lowercase__: List[Any] = (BitBackbone,) if is_torch_available() else ()
lowercase__: Optional[Any] = BitConfig
lowercase__: Tuple = False
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = BitModelTester(self )
| 13 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = image_size
__snake_case : Tuple = patch_size
__snake_case : str = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = coordinate_size
__snake_case : List[Any] = shape_size
__snake_case : Tuple = num_labels
__snake_case : List[Any] = num_choices
__snake_case : Optional[Any] = scope
__snake_case : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : List[str] = text_seq_length
__snake_case : str = (image_size // patch_size) ** 2 + 1
__snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__snake_case : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Union[str, Any] = bbox[i, j, 3]
__snake_case : Union[str, Any] = bbox[i, j, 1]
__snake_case : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Optional[Any] = bbox[i, j, 2]
__snake_case : Tuple = bbox[i, j, 0]
__snake_case : Optional[Any] = tmp_coordinate
__snake_case : Dict = tf.constant(__magic_name__ )
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ )
# text + image
__snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
__snake_case : List[str] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , )
__snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ )
__snake_case : Tuple = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = 2
__snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Dict = False
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return True
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict:
"""simple docstring"""
__snake_case : Any = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
__snake_case : Union[str, Any] = {
k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : str = TFLayoutLMvaModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0]
]
__snake_case : List[str] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = prepared_for_class.pop("""input_ids""" )
__snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
__snake_case : str = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__snake_case : Dict = -1_00
__snake_case : str = tf.convert_to_tensor(__magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = model(__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
# Get keys that were added with the _prepare_for_class function
__snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys()
__snake_case : Optional[Any] = inspect.signature(model.call ).parameters
__snake_case : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__snake_case : Union[str, Any] = {0: """input_ids"""}
for label_key in label_keys:
__snake_case : int = signature_names.index(__magic_name__ )
__snake_case : Optional[int] = label_key
__snake_case : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__snake_case : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__snake_case : List[str] = prepared_for_class[value]
__snake_case : str = tuple(__magic_name__ )
# Send to model
__snake_case : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Tuple = type
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values
__snake_case : Tuple = tf.constant([[1, 2]] )
__snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
# verify the logits
__snake_case : List[str] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
__snake_case : Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "mvp"
lowercase = ["past_key_values"]
lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase__=50267 , UpperCamelCase__=1024 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__="gelu" , UpperCamelCase__=1024 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=0.0 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=100 , UpperCamelCase__=800 , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
A_ = vocab_size
A_ = max_position_embeddings
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = classifier_dropout
A_ = use_cache
A_ = encoder_layers
A_ = scale_embedding # scale factor will be sqrt(d_model) if True
A_ = use_prompt
A_ = prompt_length
A_ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCamelCase__ ):
A_ = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 162 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( _snake_case , unittest.TestCase ):
lowercase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def snake_case_ ( self , UpperCamelCase__=0 ) -> Tuple:
'''simple docstring'''
A_ = np.random.RandomState(UpperCamelCase__ )
A_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = pipe(**UpperCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs["""prompt"""]]
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs.pop("""prompt""" )]
A_ = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
A_ = text_inputs["""input_ids"""]
A_ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
A_ = prompt_embeds
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs()
A_ = 3 * ["""this is a negative prompt"""]
A_ = negative_prompt
A_ = 3 * [inputs["""prompt"""]]
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
A_ = self.get_dummy_inputs()
A_ = 3 * [inputs.pop("""prompt""" )]
A_ = []
for p in [prompt, negative_prompt]:
A_ = pipe.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="""np""" , )
A_ = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
A_ , A_ = embeds
# forward
A_ = pipe(**UpperCamelCase__ )
A_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = ort.SessionOptions()
A_ = False
return options
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# using the PNDM scheduler by default
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
A_ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """open neural network exchange"""
A_ = np.random.RandomState(0 )
A_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """open neural network exchange"""
A_ = np.random.RandomState(0 )
A_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" )
A_ = output.images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = 0
def test_callback_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
A_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
A_ = latents[0, -3:, -3:, -1]
A_ = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
A_ = False
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = """Andromeda galaxy in a bottle"""
A_ = np.random.RandomState(0 )
pipe(
prompt=UpperCamelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert pipe.safety_checker is None
A_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
A_ = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A_ = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 162 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
UpperCamelCase__ : List[Any] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
UpperCamelCase__ : Optional[Any] = {
"""allenai/longformer-base-4096""": 4_096,
"""allenai/longformer-large-4096""": 4_096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE__ ( ) -> int:
"""simple docstring"""
a = (
list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) )
)
a = bs[:]
a = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
a = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_, snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Any:
"""simple docstring"""
a = set()
a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a = char
return pairs
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self : Tuple ,__lowerCamelCase : int ,__lowerCamelCase : Tuple ,__lowerCamelCase : int="replace" ,__lowerCamelCase : Dict="<s>" ,__lowerCamelCase : str="</s>" ,__lowerCamelCase : Any="</s>" ,__lowerCamelCase : Dict="<s>" ,__lowerCamelCase : Tuple="<unk>" ,__lowerCamelCase : List[str]="<pad>" ,__lowerCamelCase : str="<mask>" ,__lowerCamelCase : int=False ,**__lowerCamelCase : Optional[int] ,):
'''simple docstring'''
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else sep_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,add_prefix_space=__lowerCamelCase ,**__lowerCamelCase ,)
with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle:
a = json.load(__lowerCamelCase )
a = {v: k for k, v in self.encoder.items()}
a = errors # how to handle errors in decoding
a = bytes_to_unicode()
a = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase ,encoding='''utf-8''' ) as merges_handle:
a = merges_handle.read().split('''\n''' )[1:-1]
a = [tuple(merge.split() ) for merge in bpe_merges]
a = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
a = {}
a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : List[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
a = tuple(__lowerCamelCase )
a = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
a = min(__lowerCamelCase ,key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a , a = bigram
a = []
a = 0
while i < len(__lowerCamelCase ):
try:
a = word.index(__lowerCamelCase ,__lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a = tuple(__lowerCamelCase )
a = new_word
if len(__lowerCamelCase ) == 1:
break
else:
a = get_pairs(__lowerCamelCase )
a = ''' '''.join(__lowerCamelCase )
a = word
return word
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = []
for token in re.findall(self.pat ,__lowerCamelCase ):
a = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Tuple ):
'''simple docstring'''
return self.encoder.get(__lowerCamelCase ,self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[str] ):
'''simple docstring'''
return self.decoder.get(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : int ):
'''simple docstring'''
a = ''''''.join(__lowerCamelCase )
a = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' ,errors=self.errors )
return text
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__lowerCamelCase ,ensure_ascii=__lowerCamelCase ) + '''\n''' )
a = 0
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
a = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a = [self.cls_token_id]
a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ,__lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase ,token_ids_a=__lowerCamelCase ,already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[str]=False ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = kwargs.pop('''add_prefix_space''' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
a = ''' ''' + text
return (text, kwargs)
| 330 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'luke'
def __init__( self : Dict ,__lowerCamelCase : Optional[Any]=5_02_67 ,__lowerCamelCase : str=50_00_00 ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : int=2_56 ,__lowerCamelCase : Optional[int]=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : Any=30_72 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=2 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=1 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Any=2 ,**__lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
a = vocab_size
a = entity_vocab_size
a = hidden_size
a = entity_emb_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = use_entity_aware_attention
a = classifier_dropout
| 330 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: bool = True , *SCREAMING_SNAKE_CASE_: Any , **SCREAMING_SNAKE_CASE_: Any ) -> int:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
A__ = False
if main_process_only:
A__ = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
| 68 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
else:
return a * actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(__lowerCAmelCase , __lowerCAmelCase )
return actual_power(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 136 | 0 |
from __future__ import annotations
from random import choice
def __UpperCamelCase ( _A ):
return choice(_A )
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = random_pivot(_A )
# partition based on pivot
# linear time
lowerCAmelCase_ = [e for e in lst if e < pivot]
lowerCAmelCase_ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_A ) < k - 1:
return kth_number(_A , k - len(_A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class A ( unittest.TestCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__=7, UpperCamelCase__=3, UpperCamelCase__=30, UpperCamelCase__=400, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=True, UpperCamelCase__=1 / 255, UpperCamelCase__=True, ):
"""simple docstring"""
lowerCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_pad
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase_ = image_inputs[0]
if isinstance(UpperCamelCase__, Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * h / w )
lowerCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = self.size['''shortest_edge''']
else:
lowerCAmelCase_ = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[0] )[0]
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_rescale''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_pad''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
lowerCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor()
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
lowerCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor(format='''coco_panoptic''' )
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, masks_path=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify masks
lowerCAmelCase_ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), UpperCamelCase__ )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
| 167 | 1 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A: Dict = logging.get_logger(__name__)
A: List[Any] = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
A: Optional[Any] = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
A: str = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : int ):
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : Any = json.loads(f.read() )
UpperCAmelCase : List[str] = collections.OrderedDict()
UpperCAmelCase : Optional[int] = collections.OrderedDict()
UpperCAmelCase : List[Any] = collections.OrderedDict()
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : Optional[Any] = f.readlines()
UpperCAmelCase : Tuple = [[t.rstrip("""\n""" )] if (t == "," or "," not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(_lowerCamelCase ):
UpperCAmelCase : Optional[Any] = b
UpperCAmelCase : Optional[Any] = idx
for wd in b:
UpperCAmelCase : Optional[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|startoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , do_clean_text=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F"Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F"Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
UpperCAmelCase : Dict = do_clean_text
UpperCAmelCase : List[Any] = load_vocab_and_emoji(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return len(self.raw_vocab )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return self.subword_tokenizer.tokenize(_SCREAMING_SNAKE_CASE , clean=self.do_clean_text )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(_SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict = "".join(_SCREAMING_SNAKE_CASE ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length:
UpperCAmelCase : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 0
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
UpperCAmelCase : str = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase : Optional[Any] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
UpperCAmelCase : Tuple = token_index
writer.write(""",""".join(_SCREAMING_SNAKE_CASE ) + """\n""" )
index += 1
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _SCREAMING_SNAKE_CASE )
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = vocab # same as swe
UpperCAmelCase : Optional[int] = ids_to_tokens # same as bpe
UpperCAmelCase : Tuple = emoji
UpperCAmelCase : int = np.max([len(_SCREAMING_SNAKE_CASE ) for w in self.vocab.keys()] )
UpperCAmelCase : List[Any] = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
UpperCAmelCase : Any = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
UpperCAmelCase : Union[str, Any] = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
UpperCAmelCase : Union[str, Any] = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
UpperCAmelCase : List[str] = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
UpperCAmelCase : List[str] = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
UpperCAmelCase : Union[str, Any] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase : Optional[int] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase : Optional[Any] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self ) -> Any:
'''simple docstring'''
return len(self.ids_to_tokens )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple = self.content_repattera.sub("""<URL>""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = self.content_repattera.sub("""<EMAIL>""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = self.content_repattera.sub("""<TEL>""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = self.content_repattera.sub("""<DATE>""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self.content_repattera.sub("""<DATE>""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = self.content_repattera.sub("""<PRICE>""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase : List[str] = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] = text.replace(""" """ , """<SP>""" )
UpperCAmelCase : Any = text.replace(""" """ , """<SP>""" )
UpperCAmelCase : List[Any] = text.replace("""\r\n""" , """<BR>""" )
UpperCAmelCase : List[Any] = text.replace("""\n""" , """<BR>""" )
UpperCAmelCase : Union[str, Any] = text.replace("""\r""" , """<BR>""" )
UpperCAmelCase : Any = text.replace("""\t""" , """<TAB>""" )
UpperCAmelCase : Optional[Any] = text.replace("""—""" , """ー""" )
UpperCAmelCase : Tuple = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase : Any = text.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if clean:
UpperCAmelCase : Optional[Any] = self.clean_text(_SCREAMING_SNAKE_CASE )
def check_simbol(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = x.encode()
if len(_SCREAMING_SNAKE_CASE ) == 1 and len(_SCREAMING_SNAKE_CASE ) == 2:
UpperCAmelCase : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC_2_A_1 and c <= 0xC_2_B_F)
or (c >= 0xC_7_8_0 and c <= 0xC_7_8_3)
or (c >= 0xC_A_B_9 and c <= 0xC_B_B_F)
or (c >= 0xC_C_8_0 and c <= 0xC_D_A_2)
):
return True
return False
def checkuae(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Any = x.encode()
if len(_SCREAMING_SNAKE_CASE ) == 1 and len(_SCREAMING_SNAKE_CASE ) == 3:
UpperCAmelCase : Tuple = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE_2_8_0_8_0 and c <= 0xE_2_B_0_7_F:
return True
return False
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Tuple = []
while pos < len(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = min(len(_SCREAMING_SNAKE_CASE ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase : Optional[int] = [] # (token_id, token, pos)
for e in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 ):
UpperCAmelCase : Union[str, Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_SCREAMING_SNAKE_CASE ) > 2:
UpperCAmelCase : str = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# the smallest token_id is adopted
UpperCAmelCase : Any = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[0] )[0]
result.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = e
else:
UpperCAmelCase : Tuple = pos + 1
UpperCAmelCase : Union[str, Any] = text[pos:end]
if check_simbol(_SCREAMING_SNAKE_CASE ):
result.append("""<KIGOU>""" )
elif checkuae(_SCREAMING_SNAKE_CASE ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
UpperCAmelCase : Tuple = end
return result
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="\n" ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = []
UpperCAmelCase : List[str] = []
UpperCAmelCase : List[str] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(_SCREAMING_SNAKE_CASE ).decode("""utf-8""" , errors="""replace""" ) )
UpperCAmelCase : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_SCREAMING_SNAKE_CASE )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(_SCREAMING_SNAKE_CASE ).decode("""utf-8""" , errors="""replace""" ) )
UpperCAmelCase : str = "".join(_SCREAMING_SNAKE_CASE )
return text
| 109 |
import math
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCamelCase : List[Any] = input("Enter message: ")
__UpperCamelCase : Optional[int] = int(input(F'Enter key [2-{len(_lowerCamelCase) - 1}]: '))
__UpperCamelCase : str = input("Encryption/Decryption [e/d]: ")
if mode.lower().startswith("e"):
__UpperCamelCase : List[str] = encrypt_message(_lowerCamelCase , _lowerCamelCase)
elif mode.lower().startswith("d"):
__UpperCamelCase : Dict = decrypt_message(_lowerCamelCase , _lowerCamelCase)
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = [""] * key
for col in range(_lowerCamelCase):
__UpperCamelCase : Any = col
while pointer < len(_lowerCamelCase):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : Any = math.ceil(len(_lowerCamelCase) / key)
__UpperCamelCase : Any = key
__UpperCamelCase : str = (num_cols * num_rows) - len(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = [""] * num_cols
__UpperCamelCase : Dict = 0
__UpperCamelCase : int = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__UpperCamelCase : List[Any] = 0
row += 1
return "".join(_lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 232 | 0 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a : Tuple = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ :
lowercase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
lowercase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowercase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowercase( self ) -> Dict:
UpperCAmelCase : List[str] = self.task_name.lower()
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'train'
lowercase = 'dev'
lowercase = 'test'
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 42
lowercase = 42
lowercase = 42
def __init__( self , A , A , A = None , A = Split.train , A = None , ) -> Any:
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , A , )
UpperCAmelCase : Dict = args
UpperCAmelCase : Tuple = glue_processors[args.task_name]()
UpperCAmelCase : Dict = glue_output_modes[args.task_name]
if isinstance(A , A ):
try:
UpperCAmelCase : Any = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
UpperCAmelCase : Tuple = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
UpperCAmelCase : str = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = label_list[2], label_list[1]
UpperCAmelCase : Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : Optional[Any] = cached_features_file + """.lock"""
with FileLock(A ):
if os.path.exists(A ) and not args.overwrite_cache:
UpperCAmelCase : int = time.time()
UpperCAmelCase : str = torch.load(A )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
UpperCAmelCase : str = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCAmelCase : Dict = self.processor.get_test_examples(args.data_dir )
else:
UpperCAmelCase : Tuple = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCAmelCase : int = examples[:limit_length]
UpperCAmelCase : str = glue_convert_examples_to_features(
A , A , max_length=args.max_seq_length , label_list=A , output_mode=self.output_mode , )
UpperCAmelCase : Union[str, Any] = time.time()
torch.save(self.features , A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , A ) -> InputFeatures:
return self.features[i]
def _lowercase( self ) -> Optional[Any]:
return self.label_list
| 338 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase_ :
lowercase = MBartConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : str = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Optional[Any] = eos_token_id
UpperCAmelCase : List[str] = pad_token_id
UpperCAmelCase : List[Any] = bos_token_id
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[Any] = prepare_mbart_inputs_dict(A , A , A )
return config, inputs_dict
def _lowercase( self , A , A ) -> List[str]:
UpperCAmelCase : List[str] = TFMBartModel(config=A ).get_decoder()
UpperCAmelCase : int = inputs_dict["""input_ids"""]
UpperCAmelCase : str = input_ids[:1, :]
UpperCAmelCase : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase : List[str] = inputs_dict["""head_mask"""]
UpperCAmelCase : List[Any] = 1
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , head_mask=A , use_cache=A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = outputs.to_tuple()
UpperCAmelCase : int = past_key_values[1]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[str]:
if attention_mask is None:
UpperCAmelCase : Tuple = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def _lowercase( self , A , A , A , A , A ) -> int:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : int = TFMBartModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A )
def _lowercase( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Dict:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowercase = 'facebook/mbart-large-en-ro'
@cached_property
def _lowercase( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase( self , **A ) -> Any:
UpperCAmelCase : Optional[int] = self.translate_src_text(**A )
self.assertListEqual(self.expected_text , A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.tokenizer(self.src_text , **A , return_tensors="""tf""" )
UpperCAmelCase : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase : Any = self.tokenizer.batch_decode(A , skip_special_tokens=A )
return generated_words
@slow
def _lowercase( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 338 | 1 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
a :Union[str, Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
a :Dict = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
a :Any = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
a :Any = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a :int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
a :str = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , __lowerCAmelCase )
return [m.group(0 ) for m in matches]
def _lowercase ( ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE__ : List[Any] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE__ : Any = collections.defaultdict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = collections.defaultdict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = collections.defaultdict(__lowerCAmelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _re_tf_models.match(__lowerCAmelCase ) is not None:
SCREAMING_SNAKE_CASE__ : Tuple = tf_models
SCREAMING_SNAKE_CASE__ : Optional[Any] = _re_tf_models.match(__lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(__lowerCAmelCase ) is not None:
SCREAMING_SNAKE_CASE__ : str = flax_models
SCREAMING_SNAKE_CASE__ : Any = _re_flax_models.match(__lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(__lowerCAmelCase ) is not None:
SCREAMING_SNAKE_CASE__ : str = pt_models
SCREAMING_SNAKE_CASE__ : List[Any] = _re_pt_models.match(__lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(__lowerCAmelCase ) > 0:
if attr_name in model_prefix_to_model_type:
SCREAMING_SNAKE_CASE__ : int = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE__ : Optional[Any] = """""".join(camel_case_split(__lowerCAmelCase )[:-1] )
SCREAMING_SNAKE_CASE__ : str = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
SCREAMING_SNAKE_CASE__ : Dict = list(__lowerCAmelCase )
all_models.sort()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""model_type""": all_models}
SCREAMING_SNAKE_CASE__ : Dict = [pt_models[t] for t in all_models]
SCREAMING_SNAKE_CASE__ : Any = [tf_models[t] for t in all_models]
SCREAMING_SNAKE_CASE__ : str = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
SCREAMING_SNAKE_CASE__ : List[Any] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE__ : List[str] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE__ : Any = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
SCREAMING_SNAKE_CASE__ : int = """AutoTokenizer"""
SCREAMING_SNAKE_CASE__ : int = [processors[t] for t in all_models]
return pd.DataFrame(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
SCREAMING_SNAKE_CASE__ : int = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(__lowerCAmelCase , __lowerCAmelCase ):
continue
# First extract all model_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in getattr(__lowerCAmelCase , __lowerCAmelCase ).values():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
model_names.append(__lowerCAmelCase )
else:
model_names.extend(list(__lowerCAmelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] = get_frameworks_table()
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_pandas(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Dataset.from_json(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(__lowerCAmelCase ) )
}
SCREAMING_SNAKE_CASE__ : Tuple = update_pipeline_and_auto_class_table(__lowerCAmelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
SCREAMING_SNAKE_CASE__ : List[Any] = sorted(table.keys() )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_pandas(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__lowerCAmelCase , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(__lowerCAmelCase , """pipeline_tags.json""" ) )
if commit_sha is not None:
SCREAMING_SNAKE_CASE__ : List[str] = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
SCREAMING_SNAKE_CASE__ : Tuple = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=__lowerCAmelCase , repo_type="""dataset""" , token=__lowerCAmelCase , commit_message=__lowerCAmelCase , )
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
SCREAMING_SNAKE_CASE__ : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
SCREAMING_SNAKE_CASE__ : Tuple = []
for key in pipeline_tasks:
if key not in in_table:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline_tasks[key]["""pt"""]
if isinstance(__lowerCAmelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE__ : Dict = model[0]
SCREAMING_SNAKE_CASE__ : Dict = model.__name__
if model not in in_table.values():
missing.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """, """.join(__lowerCAmelCase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
a :int = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
a :List[str] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 132 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
SCREAMING_SNAKE_CASE__ : Optional[int] = test_metrics
@require_cpu
def _a ( self ) -> List[Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _a ( self ) -> List[str]:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _a ( self ) -> int:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
| 132 | 1 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ : str = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any , __lowerCamelCase : Tuple = None , __lowerCamelCase : Tuple = None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None ):
if not conversation_id:
UpperCamelCase :Optional[int] = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase :Any = []
if generated_responses is None:
UpperCamelCase :List[str] = []
UpperCamelCase :uuid.UUID = conversation_id
UpperCamelCase :List[str] = past_user_inputs
UpperCamelCase :List[str] = generated_responses
UpperCamelCase :Optional[str] = text
def __eq__( self : Optional[int] , __lowerCamelCase : Optional[int] ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _A ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Any = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
UpperCamelCase :Union[str, Any] = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
UpperCamelCase :str = text
def _A ( self : Dict ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase :Optional[int] = None
def _A ( self : Dict , __lowerCamelCase : List[Any] ):
self.generated_responses.append(SCREAMING_SNAKE_CASE_ )
def _A ( self : str ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ):
UpperCamelCase :List[Any] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
UpperCamelCase :Dict = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_UpperCAmelCase , R"""\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n """ , )
class _SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def __init__( self : str , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ):
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase :str = self.tokenizer.eos_token
def _A ( self : int , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : List[Any] ):
UpperCamelCase :Optional[Any] = {}
UpperCamelCase :Optional[Any] = {}
UpperCamelCase :Tuple = {}
if min_length_for_response is not None:
UpperCamelCase :Union[str, Any] = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase :Tuple = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase :Optional[int] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase :Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Dict ):
UpperCamelCase :Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , num_workers=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) == 1:
return outputs[0]
return outputs
def _A ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=32 ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
UpperCamelCase :int = self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase :Dict = self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE_ )
if self.framework == "pt":
UpperCamelCase :List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase :Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _A ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=10 , **__lowerCamelCase : Tuple ):
UpperCamelCase :Any = generate_kwargs.get("""max_length""" , self.model.config.max_length )
UpperCamelCase :Optional[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
UpperCamelCase :Any = max_length - minimum_tokens
UpperCamelCase :Union[str, Any] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase :str = model_inputs["""attention_mask"""][:, -trim:]
UpperCamelCase :Union[str, Any] = model_inputs.pop("""conversation""" )
UpperCamelCase :int = max_length
UpperCamelCase :int = self.model.generate(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase :List[str] = 1
else:
UpperCamelCase :Optional[int] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _A ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : int=True ):
UpperCamelCase :Dict = model_outputs["""output_ids"""]
UpperCamelCase :Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Dict = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE_ )
return conversation
def _A ( self : List[Any] , __lowerCamelCase : Dict ):
UpperCamelCase :Dict = self.tokenizer.eos_token_id
UpperCamelCase :Union[str, Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > self.tokenizer.model_max_length:
UpperCamelCase :Optional[int] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 355 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : str = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : List[Any] = DebertaVaTokenizer
snake_case__ : Any = DebertaVaTokenizerFast
snake_case__ : Union[str, Any] = True
snake_case__ : Tuple = True
def _A ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : int , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :str = """this is a test"""
UpperCamelCase :Dict = """this is a test"""
return input_text, output_text
def _A ( self : Tuple ):
UpperCamelCase :Optional[Any] = """<pad>"""
UpperCamelCase :Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__lowerCamelCase ) , 30_001 )
def _A ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _A ( self : str ):
# fmt: off
UpperCamelCase :Optional[int] = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase :Any = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
UpperCamelCase :Optional[Any] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
UpperCamelCase :Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _A ( self : Dict ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _A ( self : Optional[Any] ):
pass
def _A ( self : Optional[int] ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :int = DebertaVaTokenizer(__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :int = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[int] = DebertaVaTokenizerFast(__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :Any = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Any ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :List[Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : str ):
# fmt: off
UpperCamelCase :List[str] = """I was born in 92000, and this is falsé."""
UpperCamelCase :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :List[str] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[Any] ):
# fmt: off
UpperCamelCase :Optional[Any] = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase :Dict = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
UpperCamelCase :int = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :int = self.get_tokenizer()
UpperCamelCase :str = self.get_rust_tokenizer()
UpperCamelCase :Dict = """I was born in 92000, and this is falsé."""
UpperCamelCase :List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
UpperCamelCase :Optional[int] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = self.get_rust_tokenizer()
UpperCamelCase :Tuple = tokenizer.encode(__lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Dict ):
UpperCamelCase :Optional[int] = """This is a test"""
UpperCamelCase :str = [13, 1, 4_398, 25, 21, 1_289]
UpperCamelCase :int = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase :Any = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase :str = DebertaVaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , keep_accents=__lowerCamelCase )
UpperCamelCase :Optional[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[Any] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# fmt: off
UpperCamelCase :Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :Any = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
UpperCamelCase :Union[str, Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
UpperCamelCase :Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase :str = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :str = DebertaVaTokenizer(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.encode("""sequence builders""" )
UpperCamelCase :Any = tokenizer.encode("""multi-sequence build""" )
UpperCamelCase :Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
UpperCamelCase :str = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __lowerCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __lowerCamelCase , )
@slow
def _A ( self : List[Any] ):
# fmt: off
UpperCamelCase :Union[str, Any] = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 62 | 0 |
from ....utils import logging
__a : Optional[int] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=20_48 ) -> Optional[int]:
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels | 210 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
lowerCAmelCase__ :Optional[int] = {
'input_ids': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase )['last_hidden_state']
lowerCAmelCase__ :List[str] = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
lowerCAmelCase__ :Union[str, Any] = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 254 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
lowerCAmelCase__ :Optional[int] = quote(_SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=_SCREAMING_SNAKE_CASE )
| 254 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _a (lowerCamelCase__ ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (DDPMScheduler,)
def __A ( self , **A__ ):
A__ : Dict = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**A__ )
return config
def __A ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A__ )
def __A ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def __A ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A__ )
def __A ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A__ )
def __A ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A__ )
def __A ( self ):
self.check_over_configs(thresholding=A__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A__ , prediction_type=A__ , sample_max_value=A__ , )
def __A ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __A ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=A__ )
def __A ( self ):
A__ : Tuple = self.scheduler_classes[0]
A__ : List[str] = self.get_scheduler_config()
A__ : List[str] = scheduler_class(**A__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def __A ( self ):
A__ : int = self.scheduler_classes[0]
A__ : List[str] = self.get_scheduler_config()
A__ : int = scheduler_class(**A__ )
A__ : Tuple = len(A__ )
A__ : List[str] = self.dummy_model()
A__ : Optional[Any] = self.dummy_sample_deter
A__ : List[str] = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
A__ : Tuple = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
A__ : Dict = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ : Optional[int] = pred_prev_sample
A__ : Tuple = torch.sum(torch.abs(A__ ) )
A__ : str = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __A ( self ):
A__ : Optional[int] = self.scheduler_classes[0]
A__ : int = self.get_scheduler_config(prediction_type="""v_prediction""" )
A__ : List[str] = scheduler_class(**A__ )
A__ : int = len(A__ )
A__ : Dict = self.dummy_model()
A__ : str = self.dummy_sample_deter
A__ : Any = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
A__ : Optional[int] = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
A__ : Tuple = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ : List[str] = pred_prev_sample
A__ : Optional[Any] = torch.sum(torch.abs(A__ ) )
A__ : List[str] = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __A ( self ):
A__ : str = self.scheduler_classes[0]
A__ : Optional[Any] = self.get_scheduler_config()
A__ : Dict = scheduler_class(**A__ )
A__ : Optional[int] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A__ )
A__ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(A__ ):
if i == len(A__ ) - 1:
A__ : str = -1
else:
A__ : List[str] = timesteps[i + 1]
A__ : Optional[int] = scheduler.previous_timestep(A__ )
A__ : List[str] = prev_t.item()
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Optional[Any] = self.scheduler_classes[0]
A__ : int = self.get_scheduler_config()
A__ : Tuple = scheduler_class(**A__ )
A__ : List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(A__ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=A__ )
def __A ( self ):
A__ : Any = self.scheduler_classes[0]
A__ : Union[str, Any] = self.get_scheduler_config()
A__ : Optional[int] = scheduler_class(**A__ )
A__ : Union[str, Any] = [100, 87, 50, 1, 0]
A__ : Optional[int] = len(A__ )
with self.assertRaises(A__ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=A__ , timesteps=A__ )
def __A ( self ):
A__ : Union[str, Any] = self.scheduler_classes[0]
A__ : Optional[Any] = self.get_scheduler_config()
A__ : Optional[int] = scheduler_class(**A__ )
A__ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=A__ )
| 192 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
A_ : Tuple = tmp_path / "cache"
A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str:
A_ : List[Any] = tmp_path / "cache"
A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : int = features.copy() if features else default_expected_features
A_ : str = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]:
A_ : Dict = tmp_path / "cache"
A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : int = parquet_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : Optional[int] = [parquet_path]
A_ : Optional[int] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
A_ : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Union[str, Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : List[str] = features.copy() if features else default_expected_features
A_ : Tuple = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]:
if split:
A_ : Any = {split: parquet_path}
else:
A_ : Optional[Any] = "train"
A_ : str = {"train": parquet_path, "test": parquet_path}
A_ : Any = tmp_path / "cache"
A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict:
A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
A_ : Dict = pf.read()
assert dataset.data.table == output_table
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]:
A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" )
A_ : int = {"image": [image_path]}
A_ : Optional[Any] = Features({"image": Image()} )
A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase )
A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any:
assert get_writer_batch_size(_lowerCAmelCase ) == expected
| 300 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_A )
class A ( _A ):
"""simple docstring"""
lowerCamelCase = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCamelCase = Features({'audio': Audio()} )
lowerCamelCase = Features({'labels': ClassLabel} )
lowerCamelCase = 'audio'
lowerCamelCase = 'labels'
def snake_case__ ( self : Union[str, Any],lowercase_ : Optional[Any] )-> List[str]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column],__SCREAMING_SNAKE_CASE ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
A__ = copy.deepcopy(self )
A__ = self.label_schema.copy()
A__ = features[self.label_column]
A__ = label_schema
return task_template
@property
def snake_case__ ( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 363 |
import random
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : str ) -> tuple:
'''simple docstring'''
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE__ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE__ )
else:
equal.append(SCREAMING_SNAKE_CASE__ )
return less, equal, greater
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if index >= len(SCREAMING_SNAKE_CASE__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE__ , index - (m + count) )
| 282 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : List[Any]=32 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : Optional[Any]=10 , lowerCAmelCase__ : Any=[8, 16, 32, 64] , lowerCAmelCase__ : Tuple=[1, 1, 2, 1] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Optional[int]="relu" , lowerCAmelCase__ : str=3 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : List[Any]=["stage2", "stage3", "stage4"] , lowerCAmelCase__ : List[str]=[2, 3, 4] , lowerCAmelCase__ : int=1 , ):
SCREAMING_SNAKE_CASE_: Tuple = parent
SCREAMING_SNAKE_CASE_: str = batch_size
SCREAMING_SNAKE_CASE_: List[Any] = image_size
SCREAMING_SNAKE_CASE_: Optional[int] = num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = embeddings_size
SCREAMING_SNAKE_CASE_: Tuple = hidden_sizes
SCREAMING_SNAKE_CASE_: str = depths
SCREAMING_SNAKE_CASE_: Dict = is_training
SCREAMING_SNAKE_CASE_: Tuple = use_labels
SCREAMING_SNAKE_CASE_: List[str] = hidden_act
SCREAMING_SNAKE_CASE_: Optional[int] = num_labels
SCREAMING_SNAKE_CASE_: Optional[int] = scope
SCREAMING_SNAKE_CASE_: Union[str, Any] = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = out_features
SCREAMING_SNAKE_CASE_: Any = out_indices
SCREAMING_SNAKE_CASE_: str = num_groups
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: int = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE_: str = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: List[Any] = BitModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[Any] = model(lowerCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_: Tuple = BitForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = BitBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[Any] = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE_: Optional[Any] = None
SCREAMING_SNAKE_CASE_: Optional[int] = BitBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_: str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_UpperCAmelCase : Optional[Any] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : int = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = BitModelTester(self)
SCREAMING_SNAKE_CASE_: Dict = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Dict):
return
@unittest.skip(reason="Bit does not output attentions")
def _SCREAMING_SNAKE_CASE ( self : Dict):
pass
@unittest.skip(reason="Bit does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : Any):
pass
@unittest.skip(reason="Bit does not support input and output embeddings")
def _SCREAMING_SNAKE_CASE ( self : Tuple):
pass
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(config=lowerCAmelCase__)
for name, module in model.named_modules():
if isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def _SCREAMING_SNAKE_CASE ( self : List[str]):
def check_hidden_states_output(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_: Optional[Any] = layer_type
SCREAMING_SNAKE_CASE_: Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Tuple = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
@unittest.skip(reason="Bit does not use feedforward chunking")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: List[Any] = BitModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE_: Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: int = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: int = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@require_torch
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int = (BitBackbone,) if is_torch_available() else ()
_UpperCAmelCase : List[str] = BitConfig
_UpperCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = BitModelTester(self)
| 13 |
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : int = logging.getLogger()
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
snake_case : Any = {}
snake_case : List[str] = os.path.join(lowercase ,"""all_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase ,"""r""" ) as f:
snake_case : Tuple = json.load(lowercase )
else:
raise ValueError(f"""can't find {path}""" )
return results
lowerCamelCase : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
import xla_spawn
snake_case : List[str] = self.get_auto_remove_tmp_dir()
snake_case : str = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A , """argv""" , A ):
snake_case : Tuple = time()
xla_spawn.main()
snake_case : Dict = time()
snake_case : Any = get_results(A )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def UpperCAmelCase ( self ) -> Optional[int]:
import xla_spawn
snake_case : List[str] = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(A , """argv""" , A ):
xla_spawn.main()
| 176 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
snake_case : List[str] = MaskFormerConfig(backbone_config=lowercase )
snake_case : List[Any] = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
snake_case : Dict = 847
snake_case : List[str] = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
snake_case : Union[str, Any] = 150
snake_case : List[Any] = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
snake_case : Union[str, Any] = 171
snake_case : int = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
snake_case : Optional[Any] = 133
snake_case : Optional[Any] = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
snake_case : Tuple = 19
snake_case : int = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
snake_case : int = 65
snake_case : Any = """mapillary-vistas-id2label.json"""
snake_case : Optional[Any] = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : List[Any] = {int(lowercase ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : Tuple = dct.pop(lowercase )
snake_case : int = val
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
snake_case : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case : Tuple = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
snake_case : Optional[Any] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[Any] = in_proj_weight[:dim, :]
snake_case : Optional[int] = in_proj_bias[: dim]
snake_case : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
snake_case : Tuple = in_proj_bias[
dim : dim * 2
]
snake_case : List[Any] = in_proj_weight[
-dim :, :
]
snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Any:
# fmt: off
snake_case : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case : Any = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
snake_case : Tuple = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[int] = in_proj_weight[: hidden_size, :]
snake_case : Any = in_proj_bias[:config.hidden_size]
snake_case : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case : int = in_proj_bias[hidden_size : hidden_size * 2]
snake_case : Any = in_proj_weight[-hidden_size :, :]
snake_case : Union[str, Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
snake_case : str = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Dict = in_proj_weight[: hidden_size, :]
snake_case : Dict = in_proj_bias[:config.hidden_size]
snake_case : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
snake_case : Tuple = in_proj_weight[-hidden_size :, :]
snake_case : str = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ) -> torch.Tensor:
snake_case : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Any = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = False ) -> Dict:
snake_case : List[str] = get_maskformer_config(lowercase )
# load original state_dict
with open(lowercase ,"""rb""" ) as f:
snake_case : Optional[Any] = pickle.load(lowercase )
snake_case : Optional[Any] = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case : str = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_swin_q_k_v(lowercase ,config.backbone_config )
read_in_decoder_q_k_v(lowercase ,lowercase )
# update to torch tensors
for key, value in state_dict.items():
snake_case : List[Any] = torch.from_numpy(lowercase )
# load 🤗 model
snake_case : int = MaskFormerForInstanceSegmentation(lowercase )
model.eval()
for name, param in model.named_parameters():
print(lowercase ,param.shape )
snake_case , snake_case : Optional[int] = model.load_state_dict(lowercase ,strict=lowercase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
snake_case : List[str] = prepare_img()
if "vistas" in model_name:
snake_case : Optional[int] = 65
elif "cityscapes" in model_name:
snake_case : int = 65535
else:
snake_case : List[str] = 255
snake_case : List[Any] = True if """ade""" in model_name else False
snake_case : Optional[int] = MaskFormerImageProcessor(ignore_index=lowercase ,reduce_labels=lowercase )
snake_case : Tuple = image_processor(lowercase ,return_tensors="""pt""" )
snake_case : str = model(**lowercase )
print("""Logits:""" ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case : Union[str, Any] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,lowercase ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCamelCase : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 176 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a: Optional[Any] = 16
__a: Any = 32
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 16 ):
lowercase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : List[Any] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : Dict = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
UpperCAmelCase , padding='''longest''' , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowercase__ : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a: Tuple = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCAmelCase ) == "1":
lowercase__ : Optional[int] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowercase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
lowercase__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : int = config['''lr''']
lowercase__ : Optional[int] = int(config['''num_epochs'''] )
lowercase__ : Optional[Any] = int(config['''seed'''] )
lowercase__ : int = int(config['''batch_size'''] )
set_seed(UpperCAmelCase )
lowercase__ , lowercase__ : str = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowercase__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ : Any = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
lowercase__ : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowercase__ : Optional[Any] = os.path.split(UpperCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowercase__ : str = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowercase__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowercase__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(UpperCAmelCase ),
'''epoch''': epoch,
} , step=UpperCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __UpperCamelCase ( ):
lowercase__ : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCAmelCase , default=UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=UpperCAmelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
lowercase__ : str = parser.parse_args()
lowercase__ : Tuple = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 198 | '''simple docstring'''
import unittest
from knapsack import knapsack as k
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[Any] = 0
lowercase__ : Any = [0]
lowercase__ : List[Any] = [0]
lowercase__ : Optional[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
lowercase__ : List[str] = [60]
lowercase__ : List[str] = [10]
lowercase__ : List[str] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = 3
lowercase__ : List[Any] = [1, 2, 3]
lowercase__ : Union[str, Any] = [3, 2, 1]
lowercase__ : List[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 5 )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : int = 50
lowercase__ : Any = [60, 100, 120]
lowercase__ : int = [10, 20, 30]
lowercase__ : Optional[Any] = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 198 | 1 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[Any] = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : Tuple = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowercase_ )
BertModel.from_pretrained(lowercase_ )
BertTokenizer.from_pretrained(lowercase_ )
pipeline(task='fill-mask' ,model=lowercase_ )
# baseline - just load from_pretrained with normal network
_a : Any = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Dict = '1'
_a : str = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : str = '\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n '
_a : int = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowercase_ )
BertModel.from_pretrained(lowercase_ )
BertTokenizer.from_pretrained(lowercase_ )
pipeline(task='fill-mask' ,model=lowercase_ )
# baseline - just load from_pretrained with normal network
_a : List[str] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Optional[Any] = self.get_env()
_a : Optional[int] = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : str = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n '
_a : int = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : Tuple = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : int = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n '
_a : List[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n '
_a : Dict = self.get_env()
_a : Optional[Any] = '1'
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : List[Any] = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = '\nfrom transformers import AutoModel\n '
_a : List[str] = '\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : int = self.get_env()
_a : List[str] = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Union[str, Any] = '1'
_a : Optional[int] = subprocess.run(lowercase_ ,env=lowercase_ ,check=lowercase_ ,capture_output=lowercase_ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 368 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 0 |
"""simple docstring"""
class lowercase :
def __init__( self : str ):
"""simple docstring"""
A_ : str = ''''''
A_ : Optional[int] = ''''''
A_ : int = []
def a_ ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ):
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A_ : int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A_ : int = self.__min_dist_top_down_dp(_lowerCamelCase , n - 1 )
A_ : List[str] = self.__min_dist_top_down_dp(m - 1 , _lowerCamelCase )
A_ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A_ : Union[str, Any] = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return self.dp[m][n]
def a_ ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : str ):
"""simple docstring"""
A_ : str = worda
A_ : Any = worda
A_ : Dict = [[-1 for _ in range(len(_lowerCamelCase ) )] for _ in range(len(_lowerCamelCase ) )]
return self.__min_dist_top_down_dp(len(_lowerCamelCase ) - 1 , len(_lowerCamelCase ) - 1 )
def a_ ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : str ):
"""simple docstring"""
A_ : int = worda
A_ : List[Any] = worda
A_ : List[str] = len(_lowerCamelCase )
A_ : Optional[Any] = len(_lowerCamelCase )
A_ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A_ : Dict = j
elif j == 0: # second string is empty
A_ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A_ : Optional[Any] = self.dp[i - 1][j - 1]
else:
A_ : Optional[int] = self.dp[i][j - 1]
A_ : Tuple = self.dp[i - 1][j]
A_ : Optional[Any] = self.dp[i - 1][j - 1]
A_ : Dict = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
_lowerCamelCase : Optional[int] = input('Enter the first string: ').strip()
_lowerCamelCase : Optional[Any] = input('Enter the second string: ').strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 167 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Any = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 167 | 1 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Tuple = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
_lowerCAmelCase :List[str] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
_lowerCAmelCase :str = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
_UpperCAmelCase : str = set()
_UpperCAmelCase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase : str = char
_UpperCAmelCase : int = set(UpperCamelCase__ )
return pairs
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> Any:
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
_UpperCAmelCase : List[Any] = vocab_file
_UpperCAmelCase : str = merges_file
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : str = 2
_UpperCAmelCase : int = 3
self.add_from_file(A )
_UpperCAmelCase : int = {v: k for k, v in self.encoder.items()}
with open(A , encoding='''utf-8''' ) as merges_handle:
_UpperCAmelCase : Optional[Any] = merges_handle.read().split('''\n''' )[:-1]
_UpperCAmelCase : List[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
_UpperCAmelCase : Any = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : List[str] = {}
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
_UpperCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : List[str] = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> List[str]:
return len(self.encoder )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , A ) -> List[Any]:
if token in self.cache:
return self.cache[token]
_UpperCAmelCase : str = tuple(A )
_UpperCAmelCase : List[str] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_UpperCAmelCase : List[Any] = get_pairs(A )
if not pairs:
return token
while True:
_UpperCAmelCase : Union[str, Any] = min(A , key=lambda A : self.bpe_ranks.get(A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase : Any = bigram
_UpperCAmelCase : int = []
_UpperCAmelCase : str = 0
while i < len(A ):
try:
_UpperCAmelCase : Tuple = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase : int = tuple(A )
_UpperCAmelCase : Optional[Any] = new_word
if len(A ) == 1:
break
else:
_UpperCAmelCase : Dict = get_pairs(A )
_UpperCAmelCase : Optional[Any] = '''@@ '''.join(A )
_UpperCAmelCase : str = word[:-4]
_UpperCAmelCase : Any = word
return word
def __lowerCAmelCase ( self , A ) -> Dict:
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Union[str, Any] = re.findall(r'''\S+\n?''' , A )
for token in words:
split_tokens.extend(list(self.bpe(A ).split(''' ''' ) ) )
return split_tokens
def __lowerCAmelCase ( self , A ) -> Optional[Any]:
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.decoder.get(A , self.unk_token )
def __lowerCAmelCase ( self , A ) -> str:
_UpperCAmelCase : Optional[Any] = ''' '''.join(A ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase : Optional[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
if os.path.abspath(self.merges_file ) != os.path.abspath(A ):
copyfile(self.merges_file , A )
return out_vocab_file, out_merge_file
def __lowerCAmelCase ( self , A ) -> Any:
if isinstance(A , A ):
try:
with open(A , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
_UpperCAmelCase : int = f.readlines()
for lineTmp in lines:
_UpperCAmelCase : Optional[int] = lineTmp.strip()
_UpperCAmelCase : str = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
_UpperCAmelCase : str = line[:idx]
_UpperCAmelCase : str = len(self.encoder )
| 68 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
_UpperCAmelCase : str = {}
def __lowerCAmelCase ( self , A , A , A=1 ) -> Optional[Any]:
if self.graph.get(A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase : Optional[int] = [[w, v]]
if not self.graph.get(A ):
_UpperCAmelCase : List[str] = []
def __lowerCAmelCase ( self ) -> Optional[int]:
return list(self.graph )
def __lowerCAmelCase ( self , A , A ) -> int:
if self.graph.get(A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A )
def __lowerCAmelCase ( self , A=-2 , A=-1 ) -> Optional[int]:
if s == d:
return []
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = []
if s == -2:
_UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A ) != 0:
_UpperCAmelCase : List[str] = stack[len(A ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(A ) == 0:
return visited
def __lowerCAmelCase ( self , A=-1 ) -> List[Any]:
if c == -1:
_UpperCAmelCase : Optional[int] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : List[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(A , A , 1 )
def __lowerCAmelCase ( self , A=-2 ) -> Optional[Any]:
_UpperCAmelCase : int = deque()
_UpperCAmelCase : Optional[int] = []
if s == -2:
_UpperCAmelCase : Tuple = list(self.graph )[0]
d.append(A )
visited.append(A )
while d:
_UpperCAmelCase : int = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowerCAmelCase ( self , A ) -> Optional[int]:
_UpperCAmelCase : str = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __lowerCAmelCase ( self , A ) -> int:
return len(self.graph[u] )
def __lowerCAmelCase ( self , A=-2 ) -> str:
_UpperCAmelCase : int = []
_UpperCAmelCase : Union[str, Any] = []
if s == -2:
_UpperCAmelCase : Any = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : str = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A ) != 0:
_UpperCAmelCase : Optional[Any] = stack[len(A ) - 1]
else:
_UpperCAmelCase : List[str] = ss
# check if se have reached the starting point
if len(A ) == 0:
return sorted_nodes
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : Union[str, Any] = -2
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Union[str, Any] = len(A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Tuple = True
if len(A ) != 0:
_UpperCAmelCase : Union[str, Any] = stack[len(A ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = False
indirect_parents.append(A )
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : int = ss
# check if se have reached the starting point
if len(A ) == 0:
return list(A )
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : int = -2
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[Any] = len(A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : List[Any] = True
if len(A ) != 0:
_UpperCAmelCase : int = stack[len(A ) - 1]
else:
_UpperCAmelCase : List[str] = False
indirect_parents.append(A )
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : Any = ss
# check if se have reached the starting point
if len(A ) == 0:
return False
def __lowerCAmelCase ( self , A=-2 , A=-1 ) -> Dict:
_UpperCAmelCase : Tuple = time()
self.dfs(A , A )
_UpperCAmelCase : Optional[int] = time()
return end - begin
def __lowerCAmelCase ( self , A=-2 ) -> Dict:
_UpperCAmelCase : int = time()
self.bfs(A )
_UpperCAmelCase : str = time()
return end - begin
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_UpperCAmelCase : str = {}
def __lowerCAmelCase ( self , A , A , A=1 ) -> str:
# check if the u exists
if self.graph.get(A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase : int = [[w, v]]
# add the other way
if self.graph.get(A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase : List[Any] = [[w, u]]
def __lowerCAmelCase ( self , A , A ) -> List[str]:
if self.graph.get(A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A )
# the other way round
if self.graph.get(A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A )
def __lowerCAmelCase ( self , A=-2 , A=-1 ) -> Any:
if s == d:
return []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Tuple = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A ) != 0:
_UpperCAmelCase : Dict = stack[len(A ) - 1]
else:
_UpperCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(A ) == 0:
return visited
def __lowerCAmelCase ( self , A=-1 ) -> List[str]:
if c == -1:
_UpperCAmelCase : int = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(A , A , 1 )
def __lowerCAmelCase ( self , A=-2 ) -> Tuple:
_UpperCAmelCase : List[str] = deque()
_UpperCAmelCase : Optional[int] = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
d.append(A )
visited.append(A )
while d:
_UpperCAmelCase : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowerCAmelCase ( self , A ) -> List[str]:
return len(self.graph[u] )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = []
_UpperCAmelCase : Any = []
_UpperCAmelCase : Optional[Any] = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : Any = -2
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[int] = len(A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Dict = True
if len(A ) != 0:
_UpperCAmelCase : List[str] = stack[len(A ) - 1]
else:
_UpperCAmelCase : str = False
indirect_parents.append(A )
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : int = ss
# check if se have reached the starting point
if len(A ) == 0:
return list(A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = list(self.graph )[0]
stack.append(A )
visited.append(A )
_UpperCAmelCase : Tuple = -2
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Any = s
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : List[str] = len(A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : List[Any] = True
if len(A ) != 0:
_UpperCAmelCase : Dict = stack[len(A ) - 1]
else:
_UpperCAmelCase : str = False
indirect_parents.append(A )
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(A ) == 0:
return False
def __lowerCAmelCase ( self ) -> int:
return list(self.graph )
def __lowerCAmelCase ( self , A=-2 , A=-1 ) -> str:
_UpperCAmelCase : List[Any] = time()
self.dfs(A , A )
_UpperCAmelCase : Union[str, Any] = time()
return end - begin
def __lowerCAmelCase ( self , A=-2 ) -> Optional[int]:
_UpperCAmelCase : List[Any] = time()
self.bfs(A )
_UpperCAmelCase : Optional[int] = time()
return end - begin
| 68 | 1 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_SCREAMING_SNAKE_CASE : str = 'path-to-your-trained-model'
_SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
_SCREAMING_SNAKE_CASE : List[str] = 'A photo of sks dog in a bucket'
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 183 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_UpperCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( __snake_case : Path , __snake_case : list ):
'''simple docstring'''
lowercase = '\n'.join(__snake_case )
Path(__snake_case ).open('w' ).writelines(__snake_case )
_UpperCamelCase : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
_UpperCamelCase : Union[str, Any] = 'sshleifer/bart-tiny-random'
_UpperCamelCase : Tuple = 'sshleifer/tiny-mbart'
_UpperCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class a ( a_ ):
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
lowercase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
lowercase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(_lowerCamelCase , _lowerCamelCase )
lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
lowercase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
lowercase = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_lowerCamelCase , 'argv' , _lowerCamelCase ):
run_generate()
assert Path(_lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase_ ( self ):
self.run_eval_tester(_lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase_ ( self , _lowerCamelCase ):
self.run_eval_tester(_lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
lowercase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
lowercase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
lowercase = Path(self.get_auto_remove_tmp_dir() )
lowercase = str(tmp_dir / 'scores.json' )
lowercase = str(tmp_dir / 'val.target' )
_dump_articles(_lowerCamelCase , text['en'] )
_dump_articles(_lowerCamelCase , text['de'] )
lowercase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
lowercase = F'\n run_eval_search.py\n {model}\n {str(_lowerCamelCase )}\n {str(_lowerCamelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(_lowerCamelCase , 'argv' , _lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
lowercase = [' num_beams | length_penalty', model, 'Best score args']
lowercase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(_lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_lowerCamelCase ).exists()
os.remove(Path(_lowerCamelCase ) )
| 220 | 0 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
_lowercase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
_lowercase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
_lowercase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def a ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Any , _lowercase : int=None , _lowercase : Dict=False , _lowercase : str=False , _lowercase : Union[str, Any]=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__UpperCAmelCase = np.array([re.sub(_lowercase , '''''' , _lowercase ) for x in predictions] )
__UpperCAmelCase = np.array([re.sub(_lowercase , '''''' , _lowercase ) for x in references] )
else:
__UpperCAmelCase = np.asarray(_lowercase )
__UpperCAmelCase = np.asarray(_lowercase )
if ignore_case:
__UpperCAmelCase = np.char.lower(_lowercase )
__UpperCAmelCase = np.char.lower(_lowercase )
if ignore_punctuation:
__UpperCAmelCase = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
__UpperCAmelCase = np.char.translate(_lowercase , table=_lowercase )
__UpperCAmelCase = np.char.translate(_lowercase , table=_lowercase )
if ignore_numbers:
__UpperCAmelCase = string.digits.maketrans('''''' , '''''' , string.digits )
__UpperCAmelCase = np.char.translate(_lowercase , table=_lowercase )
__UpperCAmelCase = np.char.translate(_lowercase , table=_lowercase )
__UpperCAmelCase = predictions == references
return {"exact_match": np.mean(_lowercase ) * 1_00}
| 363 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ).loss
SCREAMING_SNAKE_CASE__ : List[Any] = -tf.math.reduce_mean(SCREAMING_SNAKE_CASE__ ).numpy()
SCREAMING_SNAKE_CASE__ : List[str] = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 25 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
a ={
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 131072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : List[Any] = torch.sin(t * math.pi / 2 ) ** 2
__lowerCamelCase : int = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class A_ ( SCREAMING_SNAKE_CASE ):
pass
class A_ ( nn.Module ):
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
super().__init__()
__lowerCamelCase : int = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE__ ,n_attn_layers=4)
__lowerCamelCase : Optional[int] = deepcopy(self.diffusion)
__lowerCamelCase : Optional[Any] = torch.quasirandom.SobolEngine(1 ,scramble=SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : List[Any] = MODELS_MAP[model_name]['url']
os.system(F"wget {url} ./" )
return F"./{model_name}.ckpt"
a ={
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
a ={
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
a ={
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
a ={
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
a ={
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
a ={
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(F"Attn error with {name}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=1_3 ) -> Tuple:
__lowerCamelCase : List[str] = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
__lowerCamelCase : Any = 0
if string.startswith('net.3.' ):
depth += 1
__lowerCamelCase : Dict = string[6:]
elif string.startswith('net.' ):
__lowerCamelCase : Dict = string[4:]
while string.startswith('main.7.' ):
depth += 1
__lowerCamelCase : Tuple = string[7:]
if string.startswith('main.' ):
__lowerCamelCase : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
__lowerCamelCase : Dict = string[:2]
__lowerCamelCase : Tuple = string[2:]
else:
__lowerCamelCase : Dict = string[0]
__lowerCamelCase : Optional[int] = string[1:]
if depth == max_depth:
__lowerCamelCase : Union[str, Any] = MID_NUM_TO_LAYER[layer_num]
__lowerCamelCase : Any = 'mid_block'
elif depth > 0 and int(lowerCamelCase__ ) < 7:
__lowerCamelCase : Optional[int] = DOWN_NUM_TO_LAYER[layer_num]
__lowerCamelCase : List[str] = F"down_blocks.{depth}"
elif depth > 0 and int(lowerCamelCase__ ) > 7:
__lowerCamelCase : List[Any] = UP_NUM_TO_LAYER[layer_num]
__lowerCamelCase : Optional[int] = F"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
__lowerCamelCase : Optional[Any] = DEPTH_0_TO_LAYER[layer_num]
__lowerCamelCase : Union[str, Any] = F"up_blocks.{max_depth - 1}" if int(lowerCamelCase__ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"Naming error with {input_string} and string_left: {string_left}." )
__lowerCamelCase : Any = string_left[1:]
if "resnets" in new_layer:
__lowerCamelCase : Dict = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
__lowerCamelCase : Tuple = convert_attn_naming(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[Any] = prefix + '.' + new_layer + '.' + string_left
else:
__lowerCamelCase : Optional[int] = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : str = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
__lowerCamelCase : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
__lowerCamelCase : Tuple = v
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
__lowerCamelCase : int = v[:, :, 0]
else:
# bias
__lowerCamelCase : Optional[int] = v
else:
# qkv matrices
__lowerCamelCase : Optional[Any] = v.shape[0]
__lowerCamelCase : int = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__lowerCamelCase : Optional[Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__lowerCamelCase : int = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Dict = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__lowerCamelCase : List[str] = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
__lowerCamelCase : Union[str, Any] = download(lowerCamelCase__ )
__lowerCamelCase : str = MODELS_MAP[model_name]['sample_rate']
__lowerCamelCase : str = MODELS_MAP[model_name]['sample_size']
__lowerCamelCase : Union[str, Any] = Object()
__lowerCamelCase : Optional[int] = sample_size
__lowerCamelCase : Optional[int] = sample_rate
__lowerCamelCase : Any = 0
__lowerCamelCase : Tuple = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
__lowerCamelCase : str = diffusers_model.state_dict()
__lowerCamelCase : Tuple = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )['state_dict'] )
__lowerCamelCase : str = orig_model.diffusion_ema.eval()
__lowerCamelCase : List[str] = orig_model.state_dict()
__lowerCamelCase : int = rename_orig_weights(lowerCamelCase__ )
__lowerCamelCase : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__lowerCamelCase : int = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, F"Problem with {renamed_minus_diffusers}"
assert all(k.endswith('kernel' ) for k in list(lowerCamelCase__ ) ), F"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
__lowerCamelCase : List[Any] = value.squeeze()
__lowerCamelCase : Any = value
diffusers_model.load_state_dict(lowerCamelCase__ )
__lowerCamelCase : str = 1_0_0
__lowerCamelCase : Tuple = 3_3
__lowerCamelCase : Dict = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
__lowerCamelCase : Any = torch.manual_seed(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase : List[str] = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
__lowerCamelCase : Union[str, Any] = get_crash_schedule(lowerCamelCase__ )
__lowerCamelCase : Tuple = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
__lowerCamelCase : List[Any] = torch.manual_seed(3_3 )
__lowerCamelCase : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
__lowerCamelCase : Union[str, Any] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
__lowerCamelCase : Optional[int] = generated.clamp(-1 , 1 )
__lowerCamelCase : Any = (generated - audio).abs().sum()
__lowerCamelCase : List[str] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , lowerCamelCase__ )
print('Diff max' , lowerCamelCase__ )
assert diff_max < 1e-3, F"Diff max: {diff_max} is too much :-/"
print(F"Conversion for {model_name} successful!" )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
a =parser.parse_args()
main(args)
| 113 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 148 |
"""simple docstring"""
from collections import deque
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : int = process_name # process name
snake_case : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
snake_case : Tuple = arrival_time
snake_case : Optional[int] = burst_time # remaining burst time
snake_case : int = 0 # total time of the process wait in ready queue
snake_case : List[Any] = 0 # time from arrival time to completion time
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : str = number_of_queues
# time slice of queues that round robin algorithm applied
snake_case : Any = time_slices
# unfinished process is in this ready_queue
snake_case : Tuple = queue
# current time
snake_case : List[Any] = current_time
# finished process is in this sequence queue
snake_case : deque[Process] = deque()
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [q.burst_time for q in queue]
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : deque[Process] = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE ) != 0:
snake_case : Union[str, Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
snake_case : Union[str, Any] = 0
# set the process's turnaround time because it is finished
snake_case : Any = self.current_time - cp.arrival_time
# set the completion time
snake_case : Dict = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE )
self.finish_queue.extend(SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE ) ):
snake_case : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
snake_case : Optional[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
snake_case : List[str] = 0
# set the finish time
snake_case : List[Any] = self.current_time
# update the process' turnaround time because it is finished
snake_case : Union[str, Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE )
self.finish_queue.extend(SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase_ ( self ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
snake_case , snake_case : List[str] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__A = Process("P1", 0, 53)
__A = Process("P2", 0, 17)
__A = Process("P3", 0, 68)
__A = Process("P4", 0, 24)
__A = 3
__A = [17, 25]
__A = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
__A = Process("P1", 0, 53)
__A = Process("P2", 0, 17)
__A = Process("P3", 0, 68)
__A = Process("P4", 0, 24)
__A = 3
__A = [17, 25]
__A = deque([Pa, Pa, Pa, Pa])
__A = MLFQ(number_of_queues, time_slices, queue, 0)
__A = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 148 | 1 |
UpperCAmelCase : dict[tuple[int, int, int], int] = {}
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowerCamelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowerCamelCase = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowerCamelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowerCamelCase = _calculate(days - 1 , lowerCamelCase__ , 0 )
lowerCamelCase = state_late + state_absent + state_ontime
lowerCamelCase = prizestrings
return prizestrings
def __lowerCamelCase ( lowerCamelCase__ : int = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 66 |
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = 1
while len(lowerCamelCase__ ) < 1E6:
constant.append(str(lowerCamelCase__ ) )
i += 1
lowerCamelCase = """""".join(lowerCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 66 | 1 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
A__ : Any ={
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> Tuple:
_lowerCAmelCase = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowercase__ ( cls : Optional[int] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
_lowerCAmelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__snake_case , repo_id="""test-config""" , push_to_hub=__snake_case , use_auth_token=self._token )
_lowerCAmelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
_lowerCAmelCase = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__snake_case , repo_id="""valid_org/test-config-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
_lowerCAmelCase = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowercase__ ( self : Dict ) -> Any:
CustomConfig.register_for_auto_class()
_lowerCAmelCase = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
_lowerCAmelCase = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
_lowerCAmelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase = c.n_embd + 1 # int
_lowerCAmelCase = c.resid_pdrop + 1.0 # float
_lowerCAmelCase = not c.scale_attn_weights # bool
_lowerCAmelCase = c.summary_type + """foo""" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(__snake_case , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__snake_case , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__snake_case , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__snake_case , c.summary_type , """mismatch for key: summary_type""" )
def lowercase__ ( self : Optional[Any] ) -> Any:
_lowerCAmelCase = PretrainedConfig()
_lowerCAmelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__snake_case , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
_lowerCAmelCase = [key for key, value in config_common_kwargs.items() if value == getattr(__snake_case , __snake_case )]
if len(__snake_case ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
f" {', '.join(__snake_case )}." )
def lowercase__ ( self : List[str] ) -> List[Any]:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
_lowerCAmelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__snake_case )
def lowercase__ ( self : Optional[Any] ) -> str:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase = mock.Mock()
_lowerCAmelCase = 5_00
_lowerCAmelCase = {}
_lowerCAmelCase = HTTPError
_lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
_lowerCAmelCase = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self : str ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def lowercase__ ( self : str ) -> Union[str, Any]:
_lowerCAmelCase = AutoConfig.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__snake_case )
_lowerCAmelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(__snake_case , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase = AutoConfig.from_pretrained(__snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase = ["""config.42.0.0.json"""]
_lowerCAmelCase = 7_68
configuration.save_pretrained(__snake_case )
shutil.move(os.path.join(__snake_case , """config.4.0.0.json""" ) , os.path.join(__snake_case , """config.42.0.0.json""" ) )
_lowerCAmelCase = AutoConfig.from_pretrained(__snake_case )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
_lowerCAmelCase = """v4.0.0"""
_lowerCAmelCase , _lowerCAmelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
__snake_case , return_unused_kwargs=__snake_case )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__snake_case , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase = """v3.0.0"""
_lowerCAmelCase = old_transformers.models.auto.AutoConfig.from_pretrained(__snake_case )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 70 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : float = 0 ):
'''simple docstring'''
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self : int ):
'''simple docstring'''
_snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowercase , len(str(lowercase ) ) )
_snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowercase : list[float] ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def A ( self : str , lowercase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase : tuple[int, int] , lowercase : float ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
_snake_case = value
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self : List[str] , lowercase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , lowercase : int | float | Matrix ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = f'''Unsupported type given for another ({type(lowercase )})'''
raise TypeError(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def A ( self : List[Any] , lowercase : Matrix , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
# a^(-1)
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}''' )
def a_ ( ) -> None:
import doctest
doctest.testmod()
testa() | 282 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__A ='\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__A ='\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__A ='\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def snake_case__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , reference_urls=[] , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase__ : List[str] = np.array([re.sub(_lowerCamelCase , """""" , _lowerCamelCase) for x in predictions])
UpperCAmelCase__ : Union[str, Any] = np.array([re.sub(_lowerCamelCase , """""" , _lowerCamelCase) for x in references])
else:
UpperCAmelCase__ : Optional[Any] = np.asarray(_lowerCamelCase)
UpperCAmelCase__ : Dict = np.asarray(_lowerCamelCase)
if ignore_case:
UpperCAmelCase__ : Optional[int] = np.char.lower(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = np.char.lower(_lowerCamelCase)
if ignore_punctuation:
UpperCAmelCase__ : Dict = string.punctuation.maketrans("""""" , """""" , string.punctuation)
UpperCAmelCase__ : int = np.char.translate(_lowerCamelCase , table=_lowerCamelCase)
UpperCAmelCase__ : str = np.char.translate(_lowerCamelCase , table=_lowerCamelCase)
if ignore_numbers:
UpperCAmelCase__ : Optional[Any] = string.digits.maketrans("""""" , """""" , string.digits)
UpperCAmelCase__ : List[Any] = np.char.translate(_lowerCamelCase , table=_lowerCamelCase)
UpperCAmelCase__ : List[str] = np.char.translate(_lowerCamelCase , table=_lowerCamelCase)
UpperCAmelCase__ : Tuple = predictions == references
return {"exact_match": np.mean(_lowerCamelCase) * 100} | 283 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__A , __A , __A =False, False, False
@dataclass
class _snake_case :
lowerCAmelCase :Optional[int] = None
lowerCAmelCase :bool = True
lowerCAmelCase :bool = True
lowerCAmelCase :Optional[str] = None
# Automatically constructed
lowerCAmelCase :ClassVar[str] = "dict"
lowerCAmelCase :ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase :str = field(default='''Audio''' , init=a__ , repr=a__ )
def __call__( self):
return self.pa_type
def snake_case__ ( self , _lowerCamelCase):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""") from err
if isinstance(_lowerCamelCase , _lowerCamelCase):
return {"bytes": None, "path": value}
elif isinstance(_lowerCamelCase , _lowerCamelCase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ : Optional[int] = BytesIO()
sf.write(_lowerCamelCase , value["""array"""] , value["""sampling_rate"""] , format="""wav""")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""") is not None and os.path.isfile(value["""path"""]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm"""):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""")
if value.get("""bytes"""):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ : Tuple = np.frombuffer(value["""bytes"""] , dtype=np.intaa).astype(np.floataa) / 3_2767
else:
UpperCAmelCase__ : List[str] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""").astype(np.floataa) / 3_2767
UpperCAmelCase__ : List[str] = BytesIO(bytes())
sf.write(_lowerCamelCase , _lowerCamelCase , value["""sampling_rate"""] , format="""wav""")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""")}
elif value.get("""bytes""") is not None or value.get("""path""") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes"""), "path": value.get("""path""")}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''')
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""")
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = (value["""path"""], BytesIO(value["""bytes"""])) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''')
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""") from err
UpperCAmelCase__ : Dict = xsplitext(_lowerCamelCase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """)
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """)
if file is None:
UpperCAmelCase__ : int = token_per_repo_id or {}
UpperCAmelCase__ : Optional[int] = path.split("""::""")[-1]
try:
UpperCAmelCase__ : Dict = string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL)["""repo_id"""]
UpperCAmelCase__ : Dict = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ : List[Any] = None
with xopen(_lowerCamelCase , """rb""" , use_auth_token=_lowerCamelCase) as f:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = sf.read(_lowerCamelCase)
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = sf.read(_lowerCamelCase)
UpperCAmelCase__ : str = array.T
if self.mono:
UpperCAmelCase__ : List[Any] = librosa.to_mono(_lowerCamelCase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ : int = librosa.resample(_lowerCamelCase , orig_sr=_lowerCamelCase , target_sr=self.sampling_rate)
UpperCAmelCase__ : Tuple = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def snake_case__ ( self):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""")
return {
"bytes": Value("""binary"""),
"path": Value("""string"""),
}
def snake_case__ ( self , _lowerCamelCase):
if pa.types.is_string(storage.type):
UpperCAmelCase__ : Dict = pa.array([None] * len(_lowerCamelCase) , type=pa.binary())
UpperCAmelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_lowerCamelCase) , type=pa.string())
UpperCAmelCase__ : str = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("""array"""):
UpperCAmelCase__ : Optional[Any] = pa.array([Audio().encode_example(_lowerCamelCase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("""bytes""") >= 0:
UpperCAmelCase__ : int = storage.field("""bytes""")
else:
UpperCAmelCase__ : List[str] = pa.array([None] * len(_lowerCamelCase) , type=pa.binary())
if storage.type.get_field_index("""path""") >= 0:
UpperCAmelCase__ : List[Any] = storage.field("""path""")
else:
UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_lowerCamelCase) , type=pa.string())
UpperCAmelCase__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
return array_cast(_lowerCamelCase , self.pa_type)
def snake_case__ ( self , _lowerCamelCase):
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase):
with xopen(_lowerCamelCase , """rb""") as f:
UpperCAmelCase__ : int = f.read()
return bytes_
UpperCAmelCase__ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""]) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ : Optional[Any] = pa.array(
[os.path.basename(_lowerCamelCase) if path is not None else None for path in storage.field("""path""").to_pylist()] , type=pa.string() , )
UpperCAmelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null())
return array_cast(_lowerCamelCase , self.pa_type) | 283 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Dict:
assert isinstance(a__ ,a__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Dict:
__lowerCamelCase : Union[str, Any] = tmp_path / 'cache'
__lowerCamelCase : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCamelCase : str = ParquetDatasetReader(a__ ,cache_dir=a__ ,keep_in_memory=a__ ).read()
_check_parquet_dataset(a__ ,a__ )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> int:
__lowerCamelCase : List[Any] = tmp_path / 'cache'
__lowerCamelCase : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features
__lowerCamelCase : List[str] = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCamelCase : Dict = ParquetDatasetReader(a__ ,features=a__ ,cache_dir=a__ ).read()
_check_parquet_dataset(a__ ,a__ )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
__lowerCamelCase : int = tmp_path / 'cache'
__lowerCamelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Any = ParquetDatasetReader(a__ ,cache_dir=a__ ,split=a__ ).read()
_check_parquet_dataset(a__ ,a__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' ,[str, list] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Tuple:
if issubclass(a__ ,a__ ):
__lowerCamelCase : Union[str, Any] = parquet_path
elif issubclass(a__ ,a__ ):
__lowerCamelCase : List[Any] = [parquet_path]
__lowerCamelCase : Dict = tmp_path / 'cache'
__lowerCamelCase : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : List[str] = ParquetDatasetReader(a__ ,cache_dir=a__ ).read()
_check_parquet_dataset(a__ ,a__ )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=("train",) ) -> Union[str, Any]:
assert isinstance(a__ ,a__ )
for split in splits:
__lowerCamelCase : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : Dict = tmp_path / 'cache'
__lowerCamelCase : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCamelCase : Dict = ParquetDatasetReader(
{'train': parquet_path} ,cache_dir=a__ ,keep_in_memory=a__ ).read()
_check_parquet_datasetdict(a__ ,a__ )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : Optional[Any] = tmp_path / 'cache'
__lowerCamelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Union[str, Any] = features.copy() if features else default_expected_features
__lowerCamelCase : Union[str, Any] = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCamelCase : Optional[Any] = ParquetDatasetReader({'train': parquet_path} ,features=a__ ,cache_dir=a__ ).read()
_check_parquet_datasetdict(a__ ,a__ )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> str:
if split:
__lowerCamelCase : List[Any] = {split: parquet_path}
else:
__lowerCamelCase : Optional[int] = 'train'
__lowerCamelCase : List[str] = {'train': parquet_path, 'test': parquet_path}
__lowerCamelCase : Optional[int] = tmp_path / 'cache'
__lowerCamelCase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : List[str] = ParquetDatasetReader(a__ ,cache_dir=a__ ).read()
_check_parquet_datasetdict(a__ ,a__ ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
__lowerCamelCase : Any = ParquetDatasetWriter(a__ ,tmp_path / 'foo.parquet' )
assert writer.write() > 0
__lowerCamelCase : Tuple = pq.ParquetFile(tmp_path / 'foo.parquet' )
__lowerCamelCase : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
__lowerCamelCase : List[str] = str(shared_datadir / 'test_image_rgb.jpg' )
__lowerCamelCase : str = {'image': [image_path]}
__lowerCamelCase : Optional[int] = Features({'image': Image()} )
__lowerCamelCase : Union[str, Any] = Dataset.from_dict(a__ ,features=a__ )
__lowerCamelCase : int = ParquetDatasetWriter(a__ ,tmp_path / 'foo.parquet' )
assert writer.write() > 0
__lowerCamelCase : int = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
__lowerCamelCase : str = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) ,streaming=a__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' ,[
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
assert get_writer_batch_size(a__ ) == expected
| 208 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 329 | 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def lowerCamelCase ( UpperCAmelCase__ : SplitDict ) -> Any:
lowercase_ : str = split_dict._to_yaml_list()
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = SplitDict._from_yaml_list(UpperCAmelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowercase_ : List[Any] = None
# the split name of split_dict takes over the name of the split info object
lowercase_ : int = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=UpperCAmelCase__ ), SplitInfo(dataset_name="""my_dataset""" )] )
def lowerCamelCase ( UpperCAmelCase__ : str ) -> List[Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowercase_ : Optional[int] = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 21 | '''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=18 , lowercase_ : List[Any]=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : List[Any]=None , lowercase_ : Dict=True , ):
lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18}
lowercase_ : List[str] = parent
lowercase_ : Any = batch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Tuple = image_size
lowercase_ : Optional[Any] = min_resolution
lowercase_ : Dict = max_resolution
lowercase_ : Optional[int] = do_resize
lowercase_ : Optional[Any] = size
lowercase_ : Union[str, Any] = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """clusters""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Union[str, Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """image_processor.json""" )
image_processor_first.to_json_file(lowercase_ )
lowercase_ : Optional[Any] = self.image_processing_class.from_json_file(lowercase_ ).to_dict()
lowercase_ : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase_ )
lowercase_ : Any = self.image_processing_class.from_pretrained(lowercase_ ).to_dict()
lowercase_ : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
def lowerCamelCase ( ) -> Any:
lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase_ : Any = Image.open(dataset[4]["""file"""] )
lowercase_ : Dict = Image.open(dataset[5]["""file"""] )
lowercase_ : int = [imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase_ : Optional[int] = prepare_images()
# test non-batched
lowercase_ : str = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowercase_ : Tuple = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ )
# test batched
lowercase_ : List[str] = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowercase_ : Union[str, Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
| 21 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def _lowerCamelCase ( lowercase : List[Any] ) -> str:
_a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_a = 128
elif "12-12" in model_name:
_a = 12
_a = 12
elif "14-14" in model_name:
_a = 14
_a = 14
elif "16-16" in model_name:
_a = 16
_a = 16
else:
raise ValueError("Model not supported" )
_a = "huggingface/label-files"
if "speech-commands" in model_name:
_a = 35
_a = "speech-commands-v2-id2label.json"
else:
_a = 527
_a = "audioset-id2label.json"
_a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
_a = {int(lowercase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( lowercase : Dict ) -> Optional[Any]:
if "module.v" in name:
_a = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
_a = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
_a = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
_a = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
_a = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
_a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_a = name.replace("attn" , "attention.self" )
if "norm1" in name:
_a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_a = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_a = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
_a = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
_a = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> List[str]:
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(lowercase )
if "qkv" in key:
_a = key.split("." )
_a = int(key_split[3] )
_a = config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[int]:
_a = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
@torch.no_grad()
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Dict=False ) -> int:
_a = get_audio_spectrogram_transformer_config(lowercase )
_a = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
_a = model_name_to_url[model_name]
_a = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" )
# remove some keys
remove_keys(lowercase )
# rename some keys
_a = convert_state_dict(lowercase , lowercase )
# load 🤗 model
_a = ASTForAudioClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_a = -4.2_67_73_93 if "speech-commands" not in model_name else -6.84_59_78
_a = 4.5_68_99_74 if "speech-commands" not in model_name else 5.5_65_45_26
_a = 1024 if "speech-commands" not in model_name else 128
_a = ASTFeatureExtractor(mean=lowercase , std=lowercase , max_length=lowercase )
if "speech-commands" in model_name:
_a = load_dataset("speech_commands" , "v0.02" , split="validation" )
_a = dataset[0]["audio"]["array"]
else:
_a = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
_a , _a = torchaudio.load(lowercase )
_a = waveform.squeeze().numpy()
_a = feature_extractor(lowercase , sampling_rate=1_6000 , return_tensors="pt" )
# forward pass
_a = model(**lowercase )
_a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_a = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_a = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_a = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_a = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_a = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_a = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_a = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
_a = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , lowercase , atol=1E-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(lowercase )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ : Dict = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 63 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __lowercase ( lowerCamelCase : List[str] ):
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowercase :
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Optional[int] , snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : np.ndarray , snake_case : np.ndarray , snake_case : float ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(snake_case , snake_case , f"Difference between torch and flax is {diff} (>= {tol})." )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : List[Any] , snake_case : Tuple , snake_case : Any , snake_case : Tuple , snake_case : Dict=None , **snake_case : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case , snake_case )
UpperCamelCase_ : int = FlaxVisionTextDualEncoderModel(snake_case )
UpperCamelCase_ : str = model(input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : str , snake_case : Dict , snake_case : List[str] , snake_case : int , snake_case : Optional[int]=None , **snake_case : str ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Any = self.get_vision_text_model(snake_case , snake_case )
UpperCamelCase_ : List[Any] = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case )
UpperCamelCase_ : Dict = model(input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[Any] , snake_case : Tuple , snake_case : Dict , snake_case : Optional[Any] , snake_case : Any=None , **snake_case : List[str] ) -> Any:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Optional[Any] = self.get_vision_text_model(snake_case , snake_case )
UpperCamelCase_ : int = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case )
UpperCamelCase_ : Union[str, Any] = model(input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case )
UpperCamelCase_ : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case )
UpperCamelCase_ : Optional[int] = model(input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case )
UpperCamelCase_ : List[Any] = after_output[0]
UpperCamelCase_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : int , snake_case : List[str] , snake_case : Any , snake_case : str , snake_case : List[str]=None , **snake_case : str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.get_vision_text_model(snake_case , snake_case )
UpperCamelCase_ : int = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case )
UpperCamelCase_ : str = model(
input_ids=snake_case , pixel_values=snake_case , attention_mask=snake_case , output_attentions=snake_case )
UpperCamelCase_ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ : Dict = to_atuple(vision_model.config.image_size )
UpperCamelCase_ : str = to_atuple(vision_model.config.patch_size )
UpperCamelCase_ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase_ : str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase_ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Dict , snake_case : Any , snake_case : Optional[int] ) -> str:
"""simple docstring"""
pt_model.to(snake_case )
pt_model.eval()
# prepare inputs
UpperCamelCase_ : Any = inputs_dict
UpperCamelCase_ : int = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCamelCase_ : str = pt_model(**snake_case ).to_tuple()
UpperCamelCase_ : Union[str, Any] = fx_model(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case )
UpperCamelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case , from_pt=snake_case )
UpperCamelCase_ : Union[str, Any] = fx_model_loaded(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case )
UpperCamelCase_ : Any = VisionTextDualEncoderModel.from_pretrained(snake_case , from_flax=snake_case )
pt_model_loaded.to(snake_case )
pt_model_loaded.eval()
with torch.no_grad():
UpperCamelCase_ : Optional[int] = pt_model_loaded(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(snake_case , pt_output_loaded.numpy() , 4e-2 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Optional[int] , snake_case : str , snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case , snake_case )
UpperCamelCase_ : Any = VisionTextDualEncoderModel(snake_case )
UpperCamelCase_ : Optional[Any] = FlaxVisionTextDualEncoderModel(snake_case )
UpperCamelCase_ : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case )
UpperCamelCase_ : Optional[int] = fx_state
self.check_pt_flax_equivalence(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case , snake_case )
UpperCamelCase_ : str = VisionTextDualEncoderModel(snake_case )
UpperCamelCase_ : Tuple = FlaxVisionTextDualEncoderModel(snake_case )
UpperCamelCase_ : str = load_flax_weights_in_pytorch_model(snake_case , fx_model.params )
self.check_pt_flax_equivalence(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.prepare_config_and_inputs()
self.check_save_load(**snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase_ : List[str] = config_inputs_dict.pop('vision_config' )
UpperCamelCase_ : Union[str, Any] = config_inputs_dict.pop('text_config' )
UpperCamelCase_ : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(snake_case , snake_case , snake_case )
self.check_equivalence_flax_to_pt(snake_case , snake_case , snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Tuple = self.get_pretrained_model_and_inputs()
UpperCamelCase_ : Optional[Any] = model_a(**snake_case )
UpperCamelCase_ : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case )
UpperCamelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case )
UpperCamelCase_ : Any = model_a(**snake_case )
UpperCamelCase_ : Tuple = after_outputs[0]
UpperCamelCase_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1e-5 )
@require_flax
class _lowercase ( snake_case_ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=snake_case , text_from_pt=snake_case , )
UpperCamelCase_ : int = 1_3
UpperCamelCase_ : Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_ : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_ : List[Any] = random_attention_mask([batch_size, 4] )
UpperCamelCase_ : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Union[str, Any] , snake_case : int ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = FlaxViTModel(snake_case )
UpperCamelCase_ : List[str] = FlaxBertModel(snake_case )
return vision_model, text_model
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = FlaxViTModelTester(self )
UpperCamelCase_ : Dict = FlaxBertModelTester(self )
UpperCamelCase_ : int = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase_ : Optional[int] = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_ : str = vision_config_and_inputs
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowercase ( snake_case_ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=snake_case , text_from_pt=snake_case , )
UpperCamelCase_ : List[str] = 1_3
UpperCamelCase_ : Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_ : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_ : Tuple = random_attention_mask([batch_size, 4] )
UpperCamelCase_ : Any = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : int ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Any = FlaxCLIPVisionModel(snake_case )
UpperCamelCase_ : List[Any] = FlaxBertModel(snake_case )
return vision_model, text_model
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = FlaxCLIPVisionModelTester(self )
UpperCamelCase_ : int = FlaxBertModelTester(self )
UpperCamelCase_ : int = clip_model_tester.prepare_config_and_inputs()
UpperCamelCase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_ : Optional[int] = vision_config_and_inputs
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowercase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
UpperCamelCase_ : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
UpperCamelCase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase_ : Optional[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=snake_case , padding=snake_case , return_tensors='np' )
UpperCamelCase_ : Any = model(**snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCamelCase_ : List[str] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , snake_case , atol=1e-3 ) )
| 50 | import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self : List[Any] , snake_case : List[str] , snake_case : str=1_3 , snake_case : Any=3_0 , snake_case : Tuple=2 , snake_case : List[Any]=3 , snake_case : str=True , snake_case : List[Any]=True , snake_case : List[Any]=3_2 , snake_case : Union[str, Any]=5 , snake_case : Dict=4 , snake_case : str=3_7 , snake_case : Optional[int]="gelu" , snake_case : Any=0.1 , snake_case : Optional[Any]=0.1 , snake_case : Union[str, Any]=1_0 , snake_case : List[str]=0.02 , snake_case : Optional[int]=3 , snake_case : str=0.6 , snake_case : Any=None , ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = parent
UpperCamelCase_ : Optional[Any] = batch_size
UpperCamelCase_ : List[str] = image_size
UpperCamelCase_ : Optional[Any] = patch_size
UpperCamelCase_ : Optional[int] = num_channels
UpperCamelCase_ : Union[str, Any] = is_training
UpperCamelCase_ : Dict = use_labels
UpperCamelCase_ : Tuple = hidden_size
UpperCamelCase_ : str = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Any = intermediate_size
UpperCamelCase_ : Dict = hidden_act
UpperCamelCase_ : Tuple = hidden_dropout_prob
UpperCamelCase_ : Dict = attention_probs_dropout_prob
UpperCamelCase_ : Any = type_sequence_label_size
UpperCamelCase_ : str = initializer_range
UpperCamelCase_ : Tuple = mask_ratio
UpperCamelCase_ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase_ : List[Any] = (image_size // patch_size) ** 2
UpperCamelCase_ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : List[str] = None
if self.use_labels:
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Dict , snake_case : List[str] , snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ViTMAEModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Tuple = model(snake_case )
UpperCamelCase_ : Tuple = (self.image_size // self.patch_size) ** 2
UpperCamelCase_ : Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase_ : Optional[int] = 1
UpperCamelCase_ : Dict = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ : Tuple = model(snake_case )
UpperCamelCase_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : int = config_and_inputs
UpperCamelCase_ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Any = ViTMAEModelTester(self )
UpperCamelCase_ : Tuple = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Any = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[str] = model_class(snake_case )
UpperCamelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Union[str, Any] = [*signature.parameters.keys()]
UpperCamelCase_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[str] , snake_case : Optional[int] , snake_case : Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase_ : int = torch.from_numpy(snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase_ : Tuple = pt_noise
super().check_pt_tf_models(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : str = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase_ : Any = outputs[0].cpu().numpy()
UpperCamelCase_ : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_ : Union[str, Any] = model_class.from_pretrained(snake_case )
model.to(snake_case )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase_ : Any = model(**self._prepare_for_class(snake_case , snake_case ) )
# Make sure we don't have nans
UpperCamelCase_ : Optional[Any] = after_outputs[0].cpu().numpy()
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : Dict = ViTMAEModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __lowercase ( ):
UpperCamelCase_ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase_ : List[str] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(snake_case )
UpperCamelCase_ : Tuple = self.default_image_processor
UpperCamelCase_ : Union[str, Any] = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase_ : int = ViTMAEConfig()
UpperCamelCase_ : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase_ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase_ : str = model(**snake_case , noise=torch.from_numpy(snake_case ).to(device=snake_case ) )
# verify the logits
UpperCamelCase_ : Dict = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCamelCase_ : Union[str, Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case ) , atol=1e-4 ) )
| 50 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""nielsr/canine-s""": 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase__ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0xe0_00
lowerCAmelCase__ = 0xe0_01
lowerCAmelCase__ = 0xe0_02
lowerCAmelCase__ = 0xe0_03
lowerCAmelCase__ = 0xe0_04
# Maps special codepoints to human-readable names.
lowerCAmelCase__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=False , lowercase=2048 , **lowercase , ) -> List[Any]:
'''simple docstring'''
A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else bos_token
A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else eos_token
A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else sep_token
A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else cls_token
A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , model_max_length=lowercase , **lowercase , )
# Creates a mapping for looking up the IDs of special symbols.
A__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
A__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
A__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
A__ = UNICODE_VOCAB_SIZE
A__ = len(self._special_codepoints )
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return self._unicode_vocab_size
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
return list(lowercase )
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
try:
return ord(lowercase )
except TypeError:
raise ValueError(F'invalid token: \'{token}\'' )
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowercase )
except TypeError:
raise ValueError(F'invalid id: {index}' )
def UpperCamelCase ( self , lowercase ) -> Dict:
'''simple docstring'''
return "".join(lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
A__ = [1] + ([0] * len(lowercase )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowercase )) + [1]
return result
def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCamelCase ( self , lowercase , lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
return ()
| 68 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ = """\
Text data.
Second line of data."""
lowerCAmelCase__ = """file"""
@pytest.fixture(scope="session" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
A__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" )
with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Any:
'''simple docstring'''
A__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
A__ = input_paths[compression_format]
A__ = tmp_path / "cache"
A__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Dict:
'''simple docstring'''
A__ = "custom_cache"
A__ = "custom_extracted_dir"
A__ = tmp_path / "custom_extracted_path"
if default_extracted:
A__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) )
A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A__ = xz_file
A__ = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
)
A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
# relative path
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]:
'''simple docstring'''
A__ = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
# relative path
A__ = "./__missing_file__.txt"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]:
'''simple docstring'''
A__ = get_from_cache(F'tmp://{tmpfs_file}' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_head("s3://huggingface.co" )
| 68 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = ShapEPipeline
UpperCAmelCase__ : List[Any] = ["prompt"]
UpperCAmelCase__ : List[str] = ["prompt"]
UpperCAmelCase__ : int = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Union[str, Any] = False
@property
def snake_case_ ( self ) -> Union[str, Any]:
return 32
@property
def snake_case_ ( self ) -> List[str]:
return 32
@property
def snake_case_ ( self ) -> int:
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[int]:
return 8
@property
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def snake_case_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> Dict:
torch.manual_seed(0 )
UpperCamelCase : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCamelCase : Any = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : Any = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCamelCase : Dict = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[Any] = self.dummy_prior
UpperCamelCase : int = self.dummy_text_encoder
UpperCamelCase : Dict = self.dummy_tokenizer
UpperCamelCase : List[str] = self.dummy_renderer
UpperCamelCase : str = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, )
UpperCamelCase : List[str] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def snake_case_ ( self ) -> int:
UpperCamelCase : str = 'cpu'
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : int = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[int] = output.images[0]
UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase : List[str] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> int:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> str:
UpperCamelCase : str = torch_device == 'cpu'
UpperCamelCase : Optional[int] = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 1
UpperCamelCase : Union[str, Any] = 2
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase : List[Any] = batch_size * [inputs[key]]
UpperCamelCase : int = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
UpperCamelCase : Optional[int] = ShapEPipeline.from_pretrained('openai/shap-e' )
UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase : Dict = pipe(
'a shark', generator=SCREAMING_SNAKE_CASE_, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 103 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
__UpperCAmelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
UpperCAmelCase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the training data."} )
UpperCAmelCase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase__ : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
UpperCAmelCase__ : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : List[Any] = {}
if self.train_dir is not None:
UpperCamelCase : Any = self.train_dir
if self.validation_dir is not None:
UpperCamelCase : Union[str, Any] = self.validation_dir
UpperCamelCase : List[str] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str = field(
default=a__ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(a__ )} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
UpperCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase__ : str = field(default=a__ , metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase__ : bool = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={"help": "Stride to use for the encoder."} , )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_=192, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=0.6 ) -> Optional[Any]:
UpperCamelCase : List[Any] = input_size
UpperCamelCase : Any = mask_patch_size
UpperCamelCase : Tuple = model_patch_size
UpperCamelCase : Optional[Any] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
UpperCamelCase : Tuple = self.input_size // self.mask_patch_size
UpperCamelCase : int = self.mask_patch_size // self.model_patch_size
UpperCamelCase : Union[str, Any] = self.rand_size**2
UpperCamelCase : str = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> str:
UpperCamelCase : Union[str, Any] = np.random.permutation(self.token_count )[: self.mask_count]
UpperCamelCase : Tuple = np.zeros(self.token_count, dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = 1
UpperCamelCase : Union[str, Any] = mask.reshape((self.rand_size, self.rand_size) )
UpperCamelCase : str = mask.repeat(self.scale, axis=0 ).repeat(self.scale, axis=1 )
return torch.tensor(mask.flatten() )
def UpperCamelCase ( snake_case__ : int ) -> int:
UpperCamelCase : List[Any] = torch.stack([example['pixel_values'] for example in examples] )
UpperCamelCase : Optional[Any] = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCamelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
UpperCamelCase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase : Tuple = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
UpperCamelCase : List[str] = ds['train'].train_test_split(data_args.train_val_split )
UpperCamelCase : str = split['train']
UpperCamelCase : Any = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Tuple = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCamelCase : Dict = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case__ , 'decoder_type' ):
UpperCamelCase : Tuple = 'simmim'
# adapt config
UpperCamelCase : List[str] = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCamelCase : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCamelCase : Dict = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase : Optional[int] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCamelCase : Dict = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCamelCase : Union[str, Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase : Union[str, Any] = AutoModelForMaskedImageModeling.from_config(snake_case__ )
if training_args.do_train:
UpperCamelCase : Optional[int] = ds['train'].column_names
else:
UpperCamelCase : Optional[int] = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase : Dict = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase : Union[str, Any] = 'image'
elif "img" in column_names:
UpperCamelCase : int = 'img'
else:
UpperCamelCase : Optional[int] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCamelCase : Optional[int] = Compose(
[
Lambda(lambda snake_case__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCamelCase : Optional[int] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case__ : List[Any] ):
UpperCamelCase : Any = [transforms(snake_case__ ) for image in examples[image_column_name]]
UpperCamelCase : Tuple = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCamelCase : Tuple = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCamelCase : str = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Initialize our trainer
UpperCamelCase : Any = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
UpperCamelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase : Tuple = last_checkpoint
UpperCamelCase : List[Any] = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase : List[str] = trainer.evaluate()
trainer.log_metrics('eval' , snake_case__ )
trainer.save_metrics('eval' , snake_case__ )
# Write model card and (optionally) push to hub
UpperCamelCase : List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 103 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_UpperCamelCase = logging.getLogger(__name__)
def _lowercase ( lowercase__ , lowercase__ ):
# save results
if os.path.exists(_UpperCamelCase ):
if os.path.exists(os.path.join(_UpperCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''config.json''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
def _lowercase ( lowercase__ , lowercase__=False ):
__lowerCAmelCase : Optional[Any] = 2
if unlogit:
__lowerCAmelCase : str = torch.pow(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Tuple = p * torch.log(_UpperCamelCase )
__lowerCAmelCase : Any = 0
return -plogp.sum(dim=-1 )
def _lowercase ( lowercase__ ):
logger.info('''lv, h >\t''' + '''\t'''.join(f"""{x + 1}""" for x in range(len(_UpperCamelCase ) ) ) )
for row in range(len(_UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=True , lowercase__=True , lowercase__=None , lowercase__=False ):
__lowerCAmelCase : int = model.config.num_hidden_layers, model.config.num_attention_heads
__lowerCAmelCase : List[Any] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
__lowerCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
if head_mask is None:
__lowerCAmelCase : int = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__lowerCAmelCase : int = None
__lowerCAmelCase : Tuple = 0.0
__lowerCAmelCase : Tuple = 0.0
for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
__lowerCAmelCase : List[str] = tuple(t.to(args.device ) for t in inputs )
(__lowerCAmelCase ) : Tuple = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__lowerCAmelCase : Union[str, Any] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__lowerCAmelCase : Optional[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_UpperCamelCase ):
__lowerCAmelCase : int = entropy(attn.detach() , _UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Tuple = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__lowerCAmelCase : Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_UpperCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_UpperCamelCase )
logger.info('''Head ranked by importance scores''' )
__lowerCAmelCase : int = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__lowerCAmelCase : Dict = torch.arange(
head_importance.numel() , device=args.device )
__lowerCAmelCase : Any = head_ranks.view_as(_UpperCamelCase )
print_ad_tensor(_UpperCamelCase )
return attn_entropy, head_importance, total_loss
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : int = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase )
__lowerCAmelCase : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _UpperCamelCase , original_score * args.masking_threshold )
__lowerCAmelCase : Optional[int] = torch.ones_like(_UpperCamelCase )
__lowerCAmelCase : Any = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__lowerCAmelCase : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__lowerCAmelCase : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__lowerCAmelCase : Optional[Any] = float('''Inf''' )
__lowerCAmelCase : Tuple = head_importance.view(-1 ).sort()[1]
if len(_UpperCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
__lowerCAmelCase : List[str] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
__lowerCAmelCase : Optional[int] = new_head_mask.view(-1 )
__lowerCAmelCase : Optional[int] = 0.0
__lowerCAmelCase : Optional[Any] = new_head_mask.view_as(_UpperCamelCase )
__lowerCAmelCase : Any = new_head_mask.clone().detach()
print_ad_tensor(_UpperCamelCase )
# Compute metric and head importance again
__lowerCAmelCase : List[str] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase )
__lowerCAmelCase : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('''Final head mask''' )
print_ad_tensor(_UpperCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = datetime.now()
__lowerCAmelCase : Tuple = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = 1 / loss
__lowerCAmelCase : Dict = datetime.now() - before_time
__lowerCAmelCase : str = sum(p.numel() for p in model.parameters() )
__lowerCAmelCase : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = [
v,
]
assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = sum(p.numel() for p in model.parameters() )
__lowerCAmelCase : str = datetime.now()
__lowerCAmelCase : Tuple = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , )
__lowerCAmelCase : Optional[Any] = 1 / loss
__lowerCAmelCase : Optional[Any] = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _UpperCamelCase , _UpperCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_0_0 )
save_model(_UpperCamelCase , args.output_dir )
def _lowercase ( ):
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_UpperCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_UpperCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_UpperCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_UpperCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_UpperCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=4_2 )
parser.add_argument('''--local_rank''' , type=_UpperCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
__lowerCAmelCase : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__lowerCAmelCase : List[Any] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
__lowerCAmelCase : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__lowerCAmelCase : Dict = torch.device('''cuda''' , args.local_rank )
__lowerCAmelCase : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__lowerCAmelCase : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__lowerCAmelCase : int = nn.parallel.DistributedDataParallel(
_UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase )
elif args.n_gpu > 1:
__lowerCAmelCase : Tuple = nn.DataParallel(_UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_UpperCamelCase )
torch.save(_UpperCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Prepare dataset
__lowerCAmelCase : List[str] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__lowerCAmelCase : List[Any] = (torch.from_numpy(_UpperCamelCase ),)
__lowerCAmelCase : Optional[int] = TensorDataset(*_UpperCamelCase )
__lowerCAmelCase : Tuple = RandomSampler(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__lowerCAmelCase : List[str] = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 275 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 0 |
from bisect import bisect
from itertools import accumulate
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda __UpperCamelCase : x[0] / x[1] , reverse=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [i[0] for i in r], [i[1] for i in r]
SCREAMING_SNAKE_CASE__ = list(accumulate(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ = bisect(__UpperCamelCase , __UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 | import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __snake_case ( unittest.TestCase ):
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
SCREAMING_SNAKE_CASE__ = Vector()
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowercase ) , """(0,0,0,0,0,1)""" )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowercase ) , 4 )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2] )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4, 5] )
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([2, -1, 4] ) # for test of dot product
SCREAMING_SNAKE_CASE__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __a ( self : str ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _lowercase , _lowercase ) ) , """(3,4,7)""" )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = x.copy()
self.assertEqual(str(_lowercase ) , str(_lowercase ) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_lowercase ) , """(0,1,0)""" )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_lowercase , _lowercase ) )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowercase , _lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __a ( self : Any ):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 204 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def lowerCAmelCase (self : Tuple , snake_case_ : int ):
__a : List[Any] = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ , config_name=snake_case_ )
__a : List[str] = GenerationConfig.from_pretrained(snake_case_ , config_name=snake_case_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , snake_case_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , snake_case_ )
def lowerCAmelCase (self : Any ):
__a : str = AutoConfig.from_pretrained('''gpt2''' )
__a : Tuple = GenerationConfig.from_model_config(snake_case_ )
__a : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(snake_case_ , snake_case_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase (self : str ):
__a : List[Any] = GenerationConfig()
__a : Tuple = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
__a : List[str] = copy.deepcopy(snake_case_ )
__a : Optional[int] = generation_config.update(**snake_case_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(snake_case_ , snake_case_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(snake_case_ , {'''foo''': '''bar'''} )
def lowerCAmelCase (self : str ):
__a : List[Any] = GenerationConfig()
__a : Dict = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(snake_case_ )
__a : List[Any] = GenerationConfig.from_pretrained(snake_case_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
__a : Tuple = GenerationConfig.from_model_config(snake_case_ )
assert not hasattr(snake_case_ , '''foo''' ) # no new kwargs should be initialized if from config
def lowerCAmelCase (self : str ):
__a : Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , snake_case_ )
self.assertEqual(default_config.num_beams , 1 )
__a : List[str] = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , snake_case_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ )
__a : Any = GenerationConfig.from_pretrained(snake_case_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , snake_case_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase (cls : int ):
__a : int = TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def lowerCAmelCase (cls : str ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def lowerCAmelCase (self : Tuple ):
__a : Tuple = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
__a : Any = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case_ , repo_id='''test-generation-config''' , push_to_hub=snake_case_ , use_auth_token=self._token )
__a : Dict = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[int] = GenerationConfig(
do_sample=snake_case_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
__a : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=snake_case_ , use_auth_token=self._token )
__a : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
| 216 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase__ ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
lowercase__ =logging.WARNING
def __UpperCamelCase ( ):
__a : Optional[Any] = os.getenv('''DATASETS_VERBOSITY''' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __UpperCamelCase ( ):
return __name__.split('''.''' )[0]
def __UpperCamelCase ( ):
return logging.getLogger(_get_library_name() )
def __UpperCamelCase ( ):
# Apply our default configuration to the library root logger.
__a : str = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __UpperCamelCase ( ):
__a : Any = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[str] = None ):
if name is None:
__a : Union[str, Any] = _get_library_name()
return logging.getLogger(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return _get_library_root_logger().getEffectiveLevel()
def __UpperCamelCase ( lowerCAmelCase__ : int ):
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
return set_verbosity(lowerCAmelCase__ )
def __UpperCamelCase ( ):
__a : Union[str, Any] = False
def __UpperCamelCase ( ):
__a : Tuple = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class UpperCamelCase__ :
def __init__(self : str , *snake_case_ : str , **snake_case_ : Union[str, Any] ): # pylint: disable=unused-argument
__a : Optional[Any] = args[0] if args else None
def __iter__(self : List[str] ):
return iter(self._iterator )
def __getattr__(self : str , snake_case_ : Optional[Any] ):
def empty_fn(*snake_case_ : int , **snake_case_ : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self : Union[str, Any] ):
return self
def __exit__(self : str , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Optional[Any] ):
return
lowercase__ =True
class UpperCamelCase__ :
def __call__(self : Tuple , *snake_case_ : str , snake_case_ : str=False , **snake_case_ : Dict ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*snake_case_ , **snake_case_ )
else:
return EmptyTqdm(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Optional[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ):
__a : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : str ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase__ =_tqdm_cls()
def __UpperCamelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def __UpperCamelCase ( ):
global _tqdm_active
__a : Dict = True
def __UpperCamelCase ( ):
global _tqdm_active
__a : Union[str, Any] = False
| 216 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : Any = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ["""PerceiverFeatureExtractor"""]
A_ : Union[str, Any] = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 |
'''simple docstring'''
A_ : Optional[Any] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 349 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__a = object()
# For specifying empty leaf dict `{}`
__a = object()
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(_lowercase ) - len(_lowercase ) + 1 ):
snake_case_ :Union[str, Any] = [x.match(_lowercase ) for x, y in zip(_lowercase, ks[i:] )]
if matches and all(_lowercase ):
return True
return False
def A_ ( _lowercase ):
'''simple docstring'''
def replace(_lowercase, _lowercase ):
for rule, replacement in rules:
if _match(_lowercase, _lowercase ):
return replacement
return val
return replace
def A_ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""", _lowercase )),
(("transformer", "wte", "embedding"), P("""mp""", _lowercase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_lowercase, """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""", _lowercase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_lowercase, """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""", _lowercase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[Any] = _get_partition_rules()
snake_case_ :Dict = _replacement_rules(_lowercase )
snake_case_ :Tuple = {k: _unmatched for k in flatten_dict(_lowercase )}
snake_case_ :List[str] = {k: replace(_lowercase, _lowercase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_lowercase ) )
| 66 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = XCLIPTextConfig()
# derive patch size from model name
snake_case_ :Union[str, Any] = model_name.find("""patch""" )
snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase )
if "large" in model_name:
snake_case_ :Optional[Any] = 768
snake_case_ :Union[str, Any] = 3072
snake_case_ :Any = 12
snake_case_ :Any = 1024
snake_case_ :str = 4096
snake_case_ :Union[str, Any] = 16
snake_case_ :Union[str, Any] = 24
snake_case_ :Tuple = 768
snake_case_ :Any = 3072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ :Any = 336
snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase )
if "large" in model_name:
snake_case_ :List[Any] = 768
return config
def A_ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
snake_case_ :str = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
snake_case_ :int = name.replace("""c_proj""", """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ :str = name.replace("""positional""", """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" )
return name
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ :Dict = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
snake_case_ :Optional[Any] = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ :Any = key_split[3]
snake_case_ :Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ :str = val[
:dim, :
]
snake_case_ :Optional[int] = val[
dim : dim * 2, :
]
snake_case_ :Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ :Dict = val[
:dim
]
snake_case_ :Optional[int] = val[
dim : dim * 2
]
snake_case_ :Optional[int] = val[
-dim:
]
else:
if "weight" in key:
snake_case_ :Optional[Any] = val[
:dim, :
]
snake_case_ :List[str] = val[
dim : dim * 2, :
]
snake_case_ :Dict = val[
-dim:, :
]
else:
snake_case_ :Union[str, Any] = val[:dim]
snake_case_ :Union[str, Any] = val[
dim : dim * 2
]
snake_case_ :Union[str, Any] = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ :Tuple = key_split[2]
snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ :Optional[int] = val[:dim, :]
snake_case_ :Optional[int] = val[dim : dim * 2, :]
snake_case_ :str = val[-dim:, :]
else:
snake_case_ :str = val[:dim]
snake_case_ :Any = val[dim : dim * 2]
snake_case_ :int = val[-dim:]
else:
snake_case_ :Tuple = key_split[2]
snake_case_ :Any = config.text_config.hidden_size
if "weight" in key:
snake_case_ :Dict = val[:dim, :]
snake_case_ :Dict = val[
dim : dim * 2, :
]
snake_case_ :List[str] = val[-dim:, :]
else:
snake_case_ :Any = val[:dim]
snake_case_ :Tuple = val[
dim : dim * 2
]
snake_case_ :List[str] = val[-dim:]
else:
snake_case_ :Optional[int] = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ :Optional[Any] = val.T
snake_case_ :Tuple = val
return orig_state_dict
def A_ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
snake_case_ :str = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
snake_case_ :int = """eating_spaghetti.npy"""
elif num_frames == 32:
snake_case_ :List[str] = """eating_spaghetti_32_frames.npy"""
snake_case_ :int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", )
snake_case_ :Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def A_ ( _lowercase, _lowercase=None, _lowercase=False ):
'''simple docstring'''
snake_case_ :List[Any] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ :Optional[int] = model_to_url[model_name]
snake_case_ :int = 8
if "16-frames" in model_name:
snake_case_ :List[Any] = 16
elif "shot" in model_name:
snake_case_ :Dict = 32
snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase )
snake_case_ :Optional[Any] = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
snake_case_ :List[str] = """pytorch_model.bin"""
gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase )
snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""]
else:
snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""]
snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase )
snake_case_ :str = XCLIPModel(_lowercase )
snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase )
snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase )
snake_case_ :Optional[int] = prepare_video(_lowercase )
snake_case_ :Optional[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase )
print("""Shape of pixel values:""", inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ :List[Any] = model(**_lowercase )
# Verify outputs
snake_case_ :List[Any] = outputs.logits_per_video
snake_case_ :Any = logits_per_video.softmax(dim=1 )
print("""Probs:""", _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_lowercase, _lowercase, atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_lowercase, organization="""nielsr""" )
processor.push_to_hub(_lowercase, organization="""nielsr""" )
slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=False , _a=True , _a="None" , _a=3 , _a=4 , _a=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : str = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE__ : List[str] = relative_attention
SCREAMING_SNAKE_CASE__ : str = position_biased_input
SCREAMING_SNAKE_CASE__ : List[str] = pos_att_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Tuple:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
SCREAMING_SNAKE_CASE__ : Any = 300
return config
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForMaskedLM(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = DebertaForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForTokenClassification(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :str = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
_SCREAMING_SNAKE_CASE :str = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = DebertaModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
@slow
def _a ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def _a ( self ) -> Any:
"""simple docstring"""
pass
@slow
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 353 |
"""simple docstring"""
class __a :
'''simple docstring'''
def __init__( self , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = arr.split(""",""" )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE__ : List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE__ : Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE__ : List[Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
a :Optional[Any] = input("please input some numbers:")
a :Optional[Any] = SubArray(whole_array)
a :Optional[int] = array.solve_sub_array()
print(("the results is:", re))
| 56 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Dict = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''git_vision_model'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE="quick_gelu" , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Any = hidden_size
lowercase_ : Any = intermediate_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Tuple = num_attention_heads
lowercase_ : Union[str, Any] = num_channels
lowercase_ : Tuple = patch_size
lowercase_ : Union[str, Any] = image_size
lowercase_ : int = initializer_range
lowercase_ : Optional[Any] = attention_dropout
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = hidden_act
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Union[str, Any] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
lowercase_ : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''git'''
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_05_22 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10_24 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1_01 , __SCREAMING_SNAKE_CASE=1_02 , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if vision_config is None:
lowercase_ : str = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
lowercase_ : List[Any] = GitVisionConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = vocab_size
lowercase_ : int = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : Tuple = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : int = position_embedding_type
lowercase_ : Dict = use_cache
lowercase_ : List[str] = tie_word_embeddings
lowercase_ : Union[str, Any] = num_image_with_embedding
lowercase_ : int = bos_token_id
lowercase_ : int = eos_token_id
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[int] = self.vision_config.to_dict()
lowercase_ : List[str] = self.__class__.model_type
return output
| 93 |
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase = 4000000 ):
__UpperCAmelCase : str = []
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case_ )
__UpperCAmelCase , __UpperCAmelCase : Dict = b, a + b
return sum(snake_case_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 367 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int = 65_536 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "fourier" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple[int] = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : str = sample_size
# time
if time_embedding_type == "fourier":
__UpperCAmelCase : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_ )
__UpperCAmelCase : str = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__UpperCAmelCase : str = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_ )
__UpperCAmelCase : Dict = block_out_channels[0]
if use_timestep_embedding:
__UpperCAmelCase : Union[str, Any] = block_out_channels[0] * 4
__UpperCAmelCase : str = TimestepEmbedding(
in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , )
__UpperCAmelCase : Tuple = nn.ModuleList([] )
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[Any] = nn.ModuleList([] )
__UpperCAmelCase : Dict = None
# down
__UpperCAmelCase : str = in_channels
for i, down_block_type in enumerate(UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = output_channel
__UpperCAmelCase : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1
__UpperCAmelCase : List[str] = get_down_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase_ )
# mid
__UpperCAmelCase : Optional[Any] = get_mid_block(
UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , )
# up
__UpperCAmelCase : Tuple = list(reversed(UpperCAmelCase_ ) )
__UpperCAmelCase : Any = reversed_block_out_channels[0]
if out_block_type is None:
__UpperCAmelCase : Union[str, Any] = out_channels
else:
__UpperCAmelCase : Dict = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_ ):
__UpperCAmelCase : int = output_channel
__UpperCAmelCase : str = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_ ) - 1 else final_upsample_channels
)
__UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1
__UpperCAmelCase : Dict = get_up_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = output_channel
# out
__UpperCAmelCase : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
__UpperCAmelCase : List[Any] = get_out_block(
out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
__UpperCAmelCase : Dict = timestep
if not torch.is_tensor(UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(sample.device )
__UpperCAmelCase : List[str] = self.time_proj(UpperCAmelCase_ )
if self.config.use_timestep_embedding:
__UpperCAmelCase : Any = self.time_mlp(UpperCAmelCase_ )
else:
__UpperCAmelCase : Any = timestep_embed[..., None]
__UpperCAmelCase : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__UpperCAmelCase : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__UpperCAmelCase : int = ()
for downsample_block in self.down_blocks:
__UpperCAmelCase , __UpperCAmelCase : int = downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__UpperCAmelCase : List[str] = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__UpperCAmelCase : Any = down_block_res_samples[-1:]
__UpperCAmelCase : List[Any] = down_block_res_samples[:-1]
__UpperCAmelCase : str = upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_ )
# 5. post-process
if self.out_block:
__UpperCAmelCase : Tuple = self.out_block(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase_ )
| 37 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _lowerCamelCase( _a ):
lowercase_ : List[str] = """xlm-roberta-xl"""
def __init__( self, lowerCamelCase=25_08_80, lowerCamelCase=25_60, lowerCamelCase=36, lowerCamelCase=32, lowerCamelCase=1_02_40, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_14, lowerCamelCase=1, lowerCamelCase=0.0_2, lowerCamelCase=1E-05, lowerCamelCase=1, lowerCamelCase=0, lowerCamelCase=2, lowerCamelCase="absolute", lowerCamelCase=True, lowerCamelCase=None, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase)
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : str = intermediate_size
_lowercase : str = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Dict = initializer_range
_lowercase : Dict = layer_norm_eps
_lowercase : Dict = position_embedding_type
_lowercase : Optional[int] = use_cache
_lowercase : str = classifier_dropout
class _lowerCamelCase( _a ):
@property
def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_lowercase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 21 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_lowercase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase : Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : str = np.concatenate(lowerCamelCase_ , axis=0 )
_lowercase : Dict = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_55.0
_lowercase : Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowercase : str = 2.0 * image - 1.0
_lowercase : Tuple = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
_lowercase : Any = torch.cat(lowerCamelCase_ , dim=0 )
return image
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0.99_95 ) -> Tuple:
if not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : List[Any] = True
_lowercase : Any = va.device
_lowercase : Union[str, Any] = va.cpu().numpy()
_lowercase : int = va.cpu().numpy()
_lowercase : int = np.sum(va * va / (np.linalg.norm(lowerCamelCase_ ) * np.linalg.norm(lowerCamelCase_ )) )
if np.abs(lowerCamelCase_ ) > DOT_THRESHOLD:
_lowercase : Any = (1 - t) * va + t * va
else:
_lowercase : Dict = np.arccos(lowerCamelCase_ )
_lowercase : str = np.sin(lowerCamelCase_ )
_lowercase : int = theta_a * t
_lowercase : Dict = np.sin(lowerCamelCase_ )
_lowercase : Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowercase : List[Any] = sin_theta_t / sin_theta_a
_lowercase : Dict = sa * va + sa * va
if inputs_are_torch:
_lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
return va
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
_lowercase : Tuple = F.normalize(lowerCamelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
for param in model.parameters():
_lowercase : Any = value
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCamelCase, text_encoder=lowerCamelCase, clip_model=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, coca_model=lowerCamelCase, coca_tokenizer=lowerCamelCase, coca_transform=lowerCamelCase, )
_lowercase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size, lowerCamelCase)
else feature_extractor.size['shortest_edge']
)
_lowercase : Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, lowerCamelCase)
set_requires_grad(self.clip_model, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase = "auto") -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae, lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
set_requires_grad(self.unet, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = min(int(num_inference_steps * strength), lowerCamelCase)
_lowercase : List[Any] = max(num_inference_steps - init_timestep, 0)
_lowercase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase, torch.Tensor):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(lowerCamelCase)}''')
_lowercase : Any = image.to(device=lowerCamelCase, dtype=lowerCamelCase)
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCamelCase)
]
_lowercase : int = torch.cat(lowerCamelCase, dim=0)
else:
_lowercase : int = self.vae.encode(lowerCamelCase).latent_dist.sample(lowerCamelCase)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : str = 0.1_8_2_1_5 * init_latents
_lowercase : List[str] = init_latents.repeat_interleave(lowerCamelCase, dim=0)
_lowercase : List[str] = randn_tensor(init_latents.shape, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase)
# get latents
_lowercase : Any = self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : str = init_latents
return latents
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = self.coca_transform(lowerCamelCase).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_lowercase : List[str] = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
_lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>', '').rstrip(' .,')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Tuple = self.feature_extractor.preprocess(lowerCamelCase)
_lowercase : List[str] = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
_lowercase : int = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : int = image_embeddings_clip.repeat_interleave(lowerCamelCase, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = latents.detach().requires_grad_()
_lowercase : Union[str, Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Tuple = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_lowercase : Any = self.scheduler.alphas_cumprod[timestep]
_lowercase : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowercase : List[str] = torch.sqrt(lowerCamelCase)
_lowercase : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, lowerCamelCase):
_lowercase : Dict = self.scheduler.sigmas[index]
_lowercase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler)} not supported''')
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Dict = 1 / 0.1_8_2_1_5 * sample
_lowercase : Optional[Any] = self.vae.decode(lowerCamelCase).sample
_lowercase : int = (image / 2 + 0.5).clamp(0, 1)
_lowercase : Any = transforms.Resize(self.feature_extractor_size)(lowerCamelCase)
_lowercase : Optional[Any] = self.normalize(lowerCamelCase).to(latents.dtype)
_lowercase : List[str] = self.clip_model.get_image_features(lowerCamelCase)
_lowercase : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCamelCase)
_lowercase : Optional[Any] = spherical_dist_loss(lowerCamelCase, lowerCamelCase).mean() * clip_guidance_scale
_lowercase : str = -torch.autograd.grad(lowerCamelCase, lowerCamelCase)[0]
if isinstance(self.scheduler, lowerCamelCase):
_lowercase : Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowercase : List[str] = noise_pred_original
else:
_lowercase : List[Any] = noise_pred_original - torch.sqrt(lowerCamelCase) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = 5_12, lowerCamelCase = 5_12, lowerCamelCase = 0.6, lowerCamelCase = 50, lowerCamelCase = 7.5, lowerCamelCase = 1, lowerCamelCase = 0.0, lowerCamelCase = 1_00, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, lowerCamelCase = 0.8, lowerCamelCase = 0.1, lowerCamelCase = 0.1, ) -> int:
"""simple docstring"""
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(lowerCamelCase)} generators.''')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''')
if isinstance(lowerCamelCase, torch.Generator) and batch_size > 1:
_lowercase : Dict = [generator] + [None] * (batch_size - 1)
_lowercase : Optional[int] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowercase : str = ', '.join(lowerCamelCase)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : List[Any] = self.get_image_description(lowerCamelCase)
if style_prompt is None:
if len(lowerCamelCase):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''')
_lowercase : Dict = self.get_image_description(lowerCamelCase)
# get prompt text embeddings for content and style
_lowercase : Optional[int] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_lowercase : Union[str, Any] = self.tokenizer(
lowerCamelCase, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='pt', )
_lowercase : List[Any] = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_lowercase : Any = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# duplicate text embeddings for each generation per prompt
_lowercase : Dict = text_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# set timesteps
_lowercase : Dict = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_offset:
_lowercase : Any = 1
self.scheduler.set_timesteps(lowerCamelCase, **lowerCamelCase)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_lowercase , _lowercase : List[Any] = self.get_timesteps(lowerCamelCase, lowerCamelCase, self.device)
_lowercase : str = timesteps[:1].repeat(lowerCamelCase)
# Preprocess image
_lowercase : str = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : int = preprocess(lowerCamelCase, lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, text_embeddings.dtype, self.device, lowerCamelCase)
_lowercase : Optional[int] = slerp(lowerCamelCase, lowerCamelCase, lowerCamelCase)
if clip_guidance_scale > 0:
_lowercase : Optional[int] = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = self.get_clip_image_embeddings(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = slerp(
lowerCamelCase, lowerCamelCase, lowerCamelCase)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase : Tuple = content_text_input.input_ids.shape[-1]
_lowercase : Union[str, Any] = self.tokenizer([''], padding='max_length', max_length=lowerCamelCase, return_tensors='pt')
_lowercase : int = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_lowercase : Union[str, Any] = uncond_embeddings.repeat_interleave(lowerCamelCase, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowercase : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowercase : List[Any] = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='cpu', dtype=lowerCamelCase).to(
self.device)
else:
_lowercase : Any = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase)
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''')
_lowercase : Tuple = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowercase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase : Dict = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowercase : Optional[Any] = {}
if accepts_eta:
_lowercase : List[Any] = eta
# check if the scheduler accepts generator
_lowercase : Dict = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_lowercase : str = generator
with self.progress_bar(total=lowerCamelCase):
for i, t in enumerate(lowerCamelCase):
# expand the latents if we are doing classifier free guidance
_lowercase : List[str] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowercase : List[Any] = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase)
# predict the noise residual
_lowercase : Dict = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowercase , _lowercase : Optional[Any] = noise_pred.chunk(2)
_lowercase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowercase : Tuple = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_lowercase , _lowercase : List[Any] = self.cond_fn(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Any = 1 / 0.1_8_2_1_5 * latents
_lowercase : List[str] = self.vae.decode(lowerCamelCase).sample
_lowercase : Tuple = (image / 2 + 0.5).clamp(0, 1)
_lowercase : List[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : List[Any] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase)
| 21 | 1 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 362 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any:
_a : Dict = 'backbone.' if is_semantic else ''
_a : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", 'beit.embeddings.cls_token'),
(f"""{prefix}patch_embed.proj.weight""", 'beit.embeddings.patch_embeddings.projection.weight'),
(f"""{prefix}patch_embed.proj.bias""", 'beit.embeddings.patch_embeddings.projection.bias'),
(f"""{prefix}pos_embed""", 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> int:
for i in range(config.num_hidden_layers ):
_a : Union[str, Any] = 'backbone.' if is_semantic else ''
# queries, keys and values
_a : Any = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
_a : str = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
_a : str = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
_a : int = in_proj_weight[
: config.hidden_size, :
]
_a : str = q_bias
_a : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_a : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_a : int = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
_a : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
_a : Tuple = gamma_a
_a : Optional[Any] = gamma_a
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_a : List[Any] = dct.pop(lowerCAmelCase_ )
_a : List[Any] = val
def __lowerCamelCase ( ) -> Dict:
_a : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : Dict = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]:
_a : Optional[Any] = False if 'rvlcdip' in checkpoint_url else True
_a : List[Any] = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_a : Optional[Any] = 1024
_a : Tuple = 4096
_a : Any = 24
_a : Optional[int] = 16
# labels
if "rvlcdip" in checkpoint_url:
_a : Any = 16
_a : Tuple = 'huggingface/label-files'
_a : List[Any] = 'rvlcdip-id2label.json'
_a : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_a : int = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_a : int = idalabel
_a : int = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_a : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='cpu' )['model']
_a : Dict = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
_a : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
_a : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
_a : Dict = prepare_img()
_a : str = image_processor(images=lowerCAmelCase_ , return_tensors='pt' )
_a : Optional[Any] = encoding['pixel_values']
_a : Optional[Any] = model(lowerCAmelCase_ )
_a : Optional[Any] = outputs.logits
# verify logits
_a : int = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
_a : Tuple = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
_a : int = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
__lowerCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 107 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 50 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[Any] = """▁"""
_UpperCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertGenerationTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : List[Any] ) -> List[str]:
super().setUp()
lowerCamelCase__ : Dict = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : List[str] = '<s>'
lowerCamelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase ) , 1002 )
def A_ ( self : List[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Dict ) -> Tuple:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'Hello World!'
lowerCamelCase__ : Dict = [18536, 2260, 101]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A_ ( self : Optional[Any] ) -> str:
lowerCamelCase__ : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCamelCase__ : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A_ ( self : int ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ : int = ' '.join(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Tuple = BertGenerationConfig()
lowerCamelCase__ : Optional[Any] = BertGenerationEncoder(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
# fmt: off
lowerCamelCase__ : Any = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 50 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case = None ,) -> str:
__lowerCAmelCase : int = {}
if train_file is not None:
__lowerCAmelCase : Dict = [train_file]
if eval_file is not None:
__lowerCAmelCase : Union[str, Any] = [eval_file]
if test_file is not None:
__lowerCAmelCase : Optional[Any] = [test_file]
__lowerCAmelCase : int = datasets.load_dataset("csv" ,data_files=__snake_case )
__lowerCAmelCase : Tuple = list(ds[list(files.keys() )[0]].features.keys() )
__lowerCAmelCase : str = features_name.pop(__snake_case )
__lowerCAmelCase : Union[str, Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowerCAmelCase : Dict = {label: i for i, label in enumerate(__snake_case )}
__lowerCAmelCase : Any = tokenizer.model_input_names
__lowerCAmelCase : Union[str, Any] = {}
if len(__snake_case ) == 1:
for k in files.keys():
__lowerCAmelCase : List[Any] = ds[k].map(
lambda __snake_case : tokenizer.batch_encode_plus(
example[features_name[0]] ,truncation=__snake_case ,max_length=__snake_case ,padding="max_length" ) ,batched=__snake_case ,)
elif len(__snake_case ) == 2:
for k in files.keys():
__lowerCAmelCase : Dict = ds[k].map(
lambda __snake_case : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) ,truncation=__snake_case ,max_length=__snake_case ,padding="max_length" ,) ,batched=__snake_case ,)
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowerCAmelCase : Any = {k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowerCAmelCase : str = {k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowerCAmelCase : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
__lowerCAmelCase : str = (
tf.data.Dataset.from_generator(
__snake_case ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowerCAmelCase : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowerCAmelCase : int = (
tf.data.Dataset.from_generator(
__snake_case ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowerCAmelCase : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowerCAmelCase : Tuple = (
tf.data.Dataset.from_generator(
__snake_case ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowerCAmelCase : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__snake_case : str = logging.getLogger(__name__)
@dataclass
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(metadata={'help': 'Which column contains the label'} )
SCREAMING_SNAKE_CASE = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the training file'} )
SCREAMING_SNAKE_CASE = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the development file'} )
SCREAMING_SNAKE_CASE = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the test file'} )
SCREAMING_SNAKE_CASE = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def _lowercase ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,)
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = get_tfds(
train_file=data_args.train_file ,eval_file=data_args.dev_file ,test_file=data_args.test_file ,tokenizer=__snake_case ,label_column_id=data_args.label_column_id ,max_seq_length=data_args.max_seq_length ,)
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=len(__snake_case ) ,labelaid=__snake_case ,idalabel={id: label for label, id in labelaid.items()} ,finetuning_task="text-classification" ,cache_dir=model_args.cache_dir ,)
with training_args.strategy.scope():
__lowerCAmelCase : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_pt=bool(".bin" in model_args.model_name_or_path ) ,config=__snake_case ,cache_dir=model_args.cache_dir ,)
def compute_metrics(__snake_case ) -> Dict:
__lowerCAmelCase : str = np.argmax(p.predictions ,axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowerCAmelCase : str = TFTrainer(
model=__snake_case ,args=__snake_case ,train_dataset=__snake_case ,eval_dataset=__snake_case ,compute_metrics=__snake_case ,)
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCAmelCase : str = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase : int = trainer.evaluate()
__lowerCAmelCase : List[str] = os.path.join(training_args.output_dir ,"eval_results.txt" )
with open(__snake_case ,"w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(__snake_case )
return results
if __name__ == "__main__":
main() | 58 |
"""simple docstring"""
from ....utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: int=2048) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = config.__dict__
__lowerCAmelCase : Dict = modal_hidden_size
if num_labels:
__lowerCAmelCase : Optional[int] = num_labels | 58 | 1 |
"""simple docstring"""
def a__ ( snake_case__ ) -> bool:
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
lowerCamelCase = str(snake_case__ )
lowerCamelCase = """""".join(sorted(snake_case__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def a__ ( snake_case__ = 99 ) -> int:
if not 0 < percent < 1_00:
raise ValueError("""solution() only accepts values from 0 to 100""" )
lowerCamelCase = 0
lowerCamelCase = 1
while True:
if check_bouncy(snake_case__ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 291 |
"""simple docstring"""
def a__ ( snake_case__ ) -> list:
if len(snake_case__ ) < 2:
return collection
def circle_sort_util(snake_case__ , snake_case__ , snake_case__ ) -> bool:
lowerCamelCase = False
if low == high:
return swapped
lowerCamelCase = low
lowerCamelCase = high
while left < right:
if collection[left] > collection[right]:
lowerCamelCase , lowerCamelCase = (
collection[right],
collection[left],
)
lowerCamelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCamelCase , lowerCamelCase = (
collection[right + 1],
collection[left],
)
lowerCamelCase = True
lowerCamelCase = low + int((high - low) / 2 )
lowerCamelCase = circle_sort_util(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase = circle_sort_util(snake_case__ , mid + 1 , snake_case__ )
return swapped or left_swap or right_swap
lowerCamelCase = True
while is_not_sorted is True:
lowerCamelCase = circle_sort_util(snake_case__ , 0 , len(snake_case__ ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 291 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = RobertaTokenizer
_lowercase : int = RobertaTokenizerFast
_lowercase : str = True
_lowercase : int = {'''cls_token''': '''<s>'''}
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase__ = {'''unk_token''': '''<unk>'''}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Tuple , **UpperCamelCase_: Optional[Any] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self: str , **UpperCamelCase_: Optional[int] ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = '''lower newer'''
lowercase__ = '''lower newer'''
return input_text, output_text
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ = '''lower newer'''
lowercase__ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase__ = tokenizer.tokenize(UpperCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase_ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase_ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tokenizer_class.from_pretrained('''roberta-base''' )
lowercase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
lowercase__ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCamelCase_ ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = '''Encode this sequence.'''
lowercase__ = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowercase__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
lowercase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
lowercase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowercase__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing spaces after special tokens
lowercase__ = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )} ) # mask token has a left space
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
lowercase__ = '''Encode <mask> sequence'''
lowercase__ = '''Encode <mask>sequence'''
lowercase__ = tokenizer.encode(UpperCamelCase_ )
lowercase__ = encoded.index(UpperCamelCase_ )
lowercase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = tokenizer.encode(UpperCamelCase_ )
lowercase__ = encoded.index(UpperCamelCase_ )
lowercase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = '''A, <mask> AllenNLP sentence.'''
lowercase__ = tokenizer_r.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
lowercase__ = tokenizer_p.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
lowercase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase_ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase_ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__ = f'{text_of_1_token} {text_of_1_token}'
lowercase__ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
lowercase__ = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
lowercase__ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
lowercase__ = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
lowercase__ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
lowercase__ = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
lowercase__ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
lowercase__ = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
lowercase__ = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase__ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
lowercase__ = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
lowercase__ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
lowercase__ = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
lowercase__ = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
lowercase__ = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
| 93 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase = 'docs/source/en/_toctree.yml'
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = defaultdict(SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(SCREAMING_SNAKE_CASE )
lowercase__ = new_doc_list
lowercase__ = [key for key, value in counts.items() if value > 1]
lowercase__ = []
for duplicate_key in duplicates:
lowercase__ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(SCREAMING_SNAKE_CASE ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
lowercase__ = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(SCREAMING_SNAKE_CASE ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(SCREAMING_SNAKE_CASE )
# Sort
return overview_doc
def _a ( SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase__ = api_doc[scheduler_idx]['''sections''']
lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE )
lowercase__ = False
if new_scheduler_doc != scheduler_doc:
lowercase__ = True
if overwrite:
lowercase__ = new_scheduler_doc
if diff:
if overwrite:
lowercase__ = api_doc
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def _a ( SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase__ = False
lowercase__ = api_doc[pipeline_idx]['''sections''']
lowercase__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase__ = pipeline_doc['''section''']
lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE )
if overwrite:
lowercase__ = new_sub_pipeline_doc
new_pipeline_docs.append(SCREAMING_SNAKE_CASE )
# sort overall pipeline doc
lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE )
if new_pipeline_docs != pipeline_docs:
lowercase__ = True
if overwrite:
lowercase__ = new_pipeline_docs
if diff:
if overwrite:
lowercase__ = api_doc
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 93 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
a_ = len(UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
a_ = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print("\n".join(str(UpperCAmelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->bool:
"""simple docstring"""
a_ = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 243 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
def is_in_circle(UpperCAmelCase , UpperCAmelCase ) -> bool:
a_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
a_ = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 , ) ->float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(UpperCAmelCase , UpperCAmelCase ) ) for _ in range(UpperCAmelCase ) ) * (max_value - min_value)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 ) ->None:
"""simple docstring"""
def identity_function(UpperCAmelCase ) -> float:
return x
a_ = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
def function_to_integrate(UpperCAmelCase ) -> float:
return sqrt(4.0 - x * x )
a_ = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 243 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
"""simple docstring"""
def __init__( self : str , *UpperCamelCase__ : str , **UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 85 | """simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowercase = '''\
Text data.
Second line of data.'''
__lowercase = '''file'''
@pytest.fixture(scope='''session''' )
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__UpperCamelCase =bytes(__UpperCamelCase , '''utf-8''' )
with zstd.open(__UpperCamelCase , '''wb''' ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , __UpperCamelCase ) , '''w''' ) as f:
f.write(__UpperCamelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__UpperCamelCase =input_paths[compression_format]
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase =DownloadConfig(cache_dir=__UpperCamelCase , extract_compressed_file=__UpperCamelCase )
__UpperCamelCase =cached_path(__UpperCamelCase , download_config=__UpperCamelCase )
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def lowerCAmelCase (__UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase ='''custom_cache'''
__UpperCamelCase ='''custom_extracted_dir'''
__UpperCamelCase =tmp_path / '''custom_extracted_path'''
if default_extracted:
__UpperCamelCase =('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __UpperCamelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__UpperCamelCase ) )
__UpperCamelCase =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__UpperCamelCase =xz_file
__UpperCamelCase =(
DownloadConfig(extract_compressed_file=__UpperCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__UpperCamelCase )
)
__UpperCamelCase =cached_path(__UpperCamelCase , download_config=__UpperCamelCase )
assert Path(__UpperCamelCase ).parent.parts[-2:] == expected
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =str(Path(__UpperCamelCase ).resolve() )
assert cached_path(__UpperCamelCase ) == text_file
# relative path
__UpperCamelCase =str(Path(__UpperCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__UpperCamelCase ) == text_file
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__UpperCamelCase ):
cached_path(__UpperCamelCase )
# relative path
__UpperCamelCase ='''./__missing_file__.txt'''
with pytest.raises(__UpperCamelCase ):
cached_path(__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase ():
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
http_get('''https://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__UpperCamelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
fsspec_head('''s3://huggingface.co''' )
| 85 | 1 |
'''simple docstring'''
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ) -> None:
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __a(SCREAMING_SNAKE_CASE_ : Node | None ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __a(SCREAMING_SNAKE_CASE_ : Node | None ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __a(SCREAMING_SNAKE_CASE_ : Node ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __a(): # Main function for testing.
'''simple docstring'''
_lowerCAmelCase = Node(1 )
_lowerCAmelCase = Node(2 )
_lowerCAmelCase = Node(3 )
_lowerCAmelCase = Node(4 )
_lowerCAmelCase = Node(5 )
_lowerCAmelCase = Node(6 )
_lowerCAmelCase = Node(7 )
_lowerCAmelCase = Node(8 )
_lowerCAmelCase = Node(9 )
print(is_full_binary_tree(SCREAMING_SNAKE_CASE_ ) )
print(depth_of_tree(SCREAMING_SNAKE_CASE_ ) )
print("Tree is: " )
display(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 158 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
while a != 0:
_lowerCAmelCase , _lowerCAmelCase = b % a, a
return b
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) != 1:
_lowerCAmelCase = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1, 0, a
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 1, m
while va != 0:
_lowerCAmelCase = ua // va
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 158 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = ["image_processor", "tokenizer"]
__A = "ViTImageProcessor"
__A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase_ , )
a = kwargs.pop("feature_extractor" )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
a = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if visual_prompt is not None:
a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if images is not None:
a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if visual_prompt is not None and images is not None:
a = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
a = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase_ , )
return self.image_processor_class
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase_ , )
return self.image_processor
| 71 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A = False
__A = False
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
a = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
a = embedding_size
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertModel(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
a = [input_ids, input_mask]
a = model(lowerCamelCase_ )
a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForMaskedLM(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForNextSentencePrediction(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForPreTraining(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = TFMobileBertForSequenceClassification(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_choices
a = TFMobileBertForMultipleChoice(config=lowerCamelCase_ )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = TFMobileBertForTokenClassification(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForQuestionAnswering(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFMobileBertModelTest.TFMobileBertModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
a = TFMobileBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
a = tf.constant([[0, 1, 2, 3, 4, 5]] )
a = model(lowerCamelCase_ )[0]
a = [1, 6, 30522]
self.assertEqual(output.shape , lowerCamelCase_ )
a = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 71 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : str = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class a ( _lowerCamelCase ):
snake_case_ = "big_bird"
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=5_0358 , lowercase_ : Tuple=768 , lowercase_ : Dict=12 , lowercase_ : str=12 , lowercase_ : Tuple=3072 , lowercase_ : Any="gelu_new" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=4096 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=1e-12 , lowercase_ : Tuple=True , lowercase_ : Tuple=0 , lowercase_ : str=1 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=66 , lowercase_ : Optional[int]="block_sparse" , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : Any=64 , lowercase_ : Tuple=3 , lowercase_ : Tuple=None , **lowercase_ : Tuple , ):
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = rescale_embeddings
snake_case_ = attention_type
snake_case_ = use_bias
snake_case_ = block_size
snake_case_ = num_random_blocks
snake_case_ = classifier_dropout
class a ( _lowerCamelCase ):
@property
def A_ ( self : str ):
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 56 | 0 |
'''simple docstring'''
from math import sqrt
def a_ ( lowerCamelCase : int ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'status' must been from type bool"
return status
def a_ ( lowerCamelCase : Any ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase = list(range(2 , n + 1 ) )
lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase ) ):
for j in range(i + 1 , len(lowerCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase = 0
# filters actual prime numbers.
lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def a_ ( lowerCamelCase : Union[str, Any] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase ):
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def a_ ( lowerCamelCase : Union[str, Any] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase = 2
lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase ):
while quotient != 1:
if is_prime(lowerCamelCase ) and (quotient % factor == 0):
ans.append(lowerCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def a_ ( lowerCamelCase : Optional[int] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase = 0
# prime factorization of 'number'
lowerCAmelCase = prime_factorization(lowerCamelCase )
lowerCAmelCase = max(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def a_ ( lowerCamelCase : List[Any] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase = 0
# prime factorization of 'number'
lowerCAmelCase = prime_factorization(lowerCamelCase )
lowerCAmelCase = min(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def a_ ( lowerCamelCase : Dict ):
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def a_ ( lowerCamelCase : int ):
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def a_ ( lowerCamelCase : Union[str, Any] ):
assert (
isinstance(lowerCamelCase , lowerCamelCase ) and (number > 2) and is_even(lowerCamelCase )
), "'number' must been an int, even and > 2"
lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase = get_prime_numbers(lowerCamelCase )
lowerCAmelCase = len(lowerCamelCase )
# run variable for while-loops.
lowerCAmelCase = 0
lowerCAmelCase = None
# exit variable. for break up the loops
lowerCAmelCase = True
while i < len_pn and loop:
lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (len(lowerCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase = 0
while numbera != 0:
lowerCAmelCase = numbera % numbera
lowerCAmelCase = numbera
lowerCAmelCase = rest
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase = prime_factorization(lowerCamelCase )
lowerCAmelCase = prime_factorization(lowerCamelCase )
elif numbera == 1 or numbera == 1:
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = max(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
for _ in range(max(lowerCamelCase , lowerCamelCase ) ):
ans *= n
else:
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a_ ( lowerCamelCase : Any ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase = 0
lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase ):
ans += 1
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and is_prime(
lowerCamelCase ), "'ans' must been a prime number and from type int"
return ans
def a_ ( lowerCamelCase : str , lowerCamelCase : List[Any] ):
assert (
is_prime(lowerCamelCase ) and is_prime(lowerCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase = p_number_a + 1 # jump to the next number
lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and ans[0] != p_number_a
and ans[len(lowerCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a_ ( lowerCamelCase : List[str] ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a_ ( lowerCamelCase : Any ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase = get_divisors(lowerCamelCase )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ):
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase = gcd(abs(lowerCamelCase ) , abs(lowerCamelCase ) )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a_ ( lowerCamelCase : Dict ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a_ ( lowerCamelCase : Dict ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase = ans
ans += fiba
lowerCAmelCase = tmp
return ans
| 55 |
'''simple docstring'''
import math
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase )
def a_ ( lowerCamelCase : float = 1 / 12345 ):
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 3
while True:
lowerCAmelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase ):
lowerCAmelCase = int(lowerCamelCase )
total_partitions += 1
if check_partition_perfect(lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 55 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : List[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = '''focalnet'''
def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : int = patch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : List[str] = use_conv_embed
lowerCAmelCase__ : List[Any] = hidden_sizes
lowerCAmelCase__ : Dict = depths
lowerCAmelCase__ : List[str] = focal_levels
lowerCAmelCase__ : List[str] = focal_windows
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Dict = mlp_ratio
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Tuple = drop_path_rate
lowerCAmelCase__ : Dict = use_layerscale
lowerCAmelCase__ : Optional[Any] = layerscale_value
lowerCAmelCase__ : str = use_post_layernorm
lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation
lowerCAmelCase__ : int = normalize_modulator
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : List[Any] = encoder_stride
lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
| 37 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 278 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__A = logging.getLogger(__name__)
@dataclass
class lowercase_ :
UpperCamelCase_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCamelCase_ : bool = field(default=__lowercase , metadata={"help": "Whether tp freeze the encoder."} )
UpperCamelCase_ : bool = field(default=__lowercase , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowercase_ :
UpperCamelCase_ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCamelCase_ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
UpperCamelCase_ : Optional[int] = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_2_8 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Source language id for translation."} )
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Target language id for translation."} )
UpperCamelCase_ : Optional[int] = field(default=__lowercase , metadata={"help": "# num_beams to use for evaluation."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , F"""{split}_results.json""" ) )
def snake_case_() -> List[Any]:
"""simple docstring"""
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case, _snake_case, _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case, _snake_case, _snake_case = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
assert hasattr(_UpperCamelCase , _UpperCamelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
_snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_snake_case = SeqaSeqDataset
# Get datasets
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_snake_case = (
build_compute_metrics_fn(data_args.task , _UpperCamelCase ) if training_args.predict_with_generate else None
)
_snake_case = SeqaSeqTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , data_args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , data_collator=SeqaSeqDataCollator(
_UpperCamelCase , _UpperCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
_snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_snake_case = train_result.metrics
_snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_snake_case = trainer.evaluate(metric_key_prefix='''val''' )
_snake_case = data_args.n_val
_snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_snake_case = trainer.predict(test_dataset=_UpperCamelCase , metric_key_prefix='''test''' )
_snake_case = test_output.metrics
_snake_case = data_args.n_test
if trainer.is_world_process_zero():
_snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.predict_with_generate:
_snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
_snake_case = lmap(str.strip , _UpperCamelCase )
write_txt_file(_UpperCamelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(_UpperCamelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def snake_case_(_UpperCamelCase ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 278 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert("""RGB""" )
_lowerCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
_lowerCAmelCase = transform(snake_case ).unsqueeze(0 ).to(snake_case )
return image
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if "visual_encoder" in key:
_lowerCAmelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , snake_case )
if "blocks" in key:
_lowerCAmelCase = re.sub(R"""blocks""" , """layers""" , snake_case )
if "attn" in key:
_lowerCAmelCase = re.sub(R"""attn""" , """self_attn""" , snake_case )
if "norm1" in key:
_lowerCAmelCase = re.sub(R"""norm1""" , """layer_norm1""" , snake_case )
if "norm2" in key:
_lowerCAmelCase = re.sub(R"""norm2""" , """layer_norm2""" , snake_case )
if "encoder.norm" in key:
_lowerCAmelCase = re.sub(R"""encoder.norm""" , """post_layernorm""" , snake_case )
if "encoder.patch_embed.proj" in key:
_lowerCAmelCase = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , snake_case )
if "encoder.pos_embed" in key:
_lowerCAmelCase = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , snake_case )
if "encoder.cls_token" in key:
_lowerCAmelCase = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , snake_case )
if "self_attn" in key:
_lowerCAmelCase = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , snake_case )
return key
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case=None ):
"""simple docstring"""
if config_path is not None:
_lowerCAmelCase = BlipConfig.from_pretrained(snake_case )
else:
_lowerCAmelCase = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
_lowerCAmelCase = BlipForConditionalGeneration(snake_case ).eval()
_lowerCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
_lowerCAmelCase = blip_decoder(pretrained=snake_case , image_size=3_84 , vit="""base""" )
_lowerCAmelCase = pt_model.eval()
_lowerCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase = modified_state_dict.pop(snake_case )
_lowerCAmelCase = rename_key(snake_case )
_lowerCAmelCase = value
hf_model.load_state_dict(snake_case )
_lowerCAmelCase = 3_84
_lowerCAmelCase = load_demo_image(image_size=snake_case , device="""cpu""" )
_lowerCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_lowerCAmelCase = tokenizer(["""a picture of"""] ).input_ids
_lowerCAmelCase = hf_model.generate(snake_case , snake_case )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_lowerCAmelCase = hf_model.generate(snake_case )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_lowerCAmelCase = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
_lowerCAmelCase = blip_vqa(pretrained=snake_case , image_size=snake_case , vit="""base""" )
vqa_model.eval()
_lowerCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase = modified_state_dict.pop(snake_case )
_lowerCAmelCase = rename_key(snake_case )
_lowerCAmelCase = value
_lowerCAmelCase = BlipForQuestionAnswering(snake_case )
hf_vqa_model.load_state_dict(snake_case )
_lowerCAmelCase = ["""How many dogs are in this image?"""]
_lowerCAmelCase = tokenizer(snake_case , return_tensors="""pt""" ).input_ids
_lowerCAmelCase = hf_vqa_model.generate(snake_case , snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
_lowerCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
_lowerCAmelCase = blip_itm(pretrained=snake_case , image_size=snake_case , vit="""base""" )
itm_model.eval()
_lowerCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_lowerCAmelCase = modified_state_dict.pop(snake_case )
_lowerCAmelCase = rename_key(snake_case )
_lowerCAmelCase = value
_lowerCAmelCase = BlipForImageTextRetrieval(snake_case )
_lowerCAmelCase = ["""A picture of a woman with a dog sitting in a beach"""]
_lowerCAmelCase = tokenizer(
snake_case , return_tensors="""pt""" , padding="""max_length""" , truncation=snake_case , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case )
hf_itm_model.eval()
_lowerCAmelCase = hf_itm_model(snake_case , snake_case , use_itm_head=snake_case )
_lowerCAmelCase = hf_itm_model(snake_case , snake_case , use_itm_head=snake_case )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
A__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 82 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
A : Tuple = None
A : Optional[Any] = logging.get_logger(__name__)
A : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A : List[str] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
A : List[str] = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
A : Optional[int] = '''▁'''
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = AlbertTokenizer
def __init__( self : Tuple , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]="[CLS]" , __lowerCAmelCase : int="[SEP]" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Dict="[SEP]" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : str="[CLS]" , __lowerCAmelCase : int="[MASK]" , **__lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
A__ = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def a_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 274 | 0 |
def a__ ( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(__UpperCamelCase , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"{solution() = }")
| 305 | import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def __A ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 305 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''data2vec-text'''
def __init__( self , A=3_0522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1e-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = classifier_dropout
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def snake_case_( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 58 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 58 | 1 |
import os
def UpperCAmelCase_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = os.path.dirname(os.path.realpath(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'triangle.txt' )
with open(__UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
SCREAMING_SNAKE_CASE_ = []
for line in triangle:
SCREAMING_SNAKE_CASE_ = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(__UpperCAmelCase ) )
a.append(__UpperCAmelCase )
for i in range(1 , len(__UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
SCREAMING_SNAKE_CASE_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
SCREAMING_SNAKE_CASE_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCAmelCase , __UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 210 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=768 ):
super().__init__(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = proj_size
SCREAMING_SNAKE_CASE_ = CLIPVisionModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PaintByExampleMapper(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int=False ):
SCREAMING_SNAKE_CASE_ = self.model(pixel_values=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = clip_output.pooler_output
SCREAMING_SNAKE_CASE_ = self.mapper(latent_states[:, None] )
SCREAMING_SNAKE_CASE_ = self.final_layer_norm(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.proj_out(_lowerCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE_ = (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE_ = config.hidden_size
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = nn.ModuleList(
[
BasicTransformerBlock(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , activation_fn='gelu' , attention_bias=_lowerCAmelCase )
for _ in range(_lowerCAmelCase )
] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] ):
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = block(_lowerCAmelCase )
return hidden_states | 210 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Tuple = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_UpperCAmelCase : List[str] = dict(zip(_A , range(len(_A ) ) ) )
_UpperCAmelCase : List[Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_UpperCAmelCase : Dict = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : int = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
# load decoder from hub
_UpperCAmelCase : List[Any] = """hf-internal-testing/ngram-beam-search-decoder"""
def __snake_case ( self , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , **_A ) -> Tuple:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , **_A ) -> Union[str, Any]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_decoder()
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCAmelCase : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_A , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : List[Any] = self.get_decoder()
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : List[Any] = floats_list((3, 10_00) )
_UpperCAmelCase : str = feature_extractor(_A , return_tensors="""np""" )
_UpperCAmelCase : int = processor(_A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_decoder()
_UpperCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[Any] = """This is a test string"""
_UpperCAmelCase : Optional[Any] = processor(text=_A )
_UpperCAmelCase : Dict = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self , _A=(2, 10, 16) , _A=77 ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : str = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_UpperCAmelCase : Optional[int] = processor.decode(_A )
_UpperCAmelCase : str = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def __snake_case ( self , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : str = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Union[str, Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCAmelCase : int = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
_UpperCAmelCase : Dict = processor.batch_decode(_A , _A )
_UpperCAmelCase : Tuple = list(_A )
with get_context("""fork""" ).Pool() as p:
_UpperCAmelCase : Tuple = decoder.decode_beams_batch(_A , _A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Tuple = self.get_decoder()
_UpperCAmelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[int] = self._get_dummy_logits()
_UpperCAmelCase : List[str] = 15
_UpperCAmelCase : Dict = -20.0
_UpperCAmelCase : List[str] = -4.0
_UpperCAmelCase : Any = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
_UpperCAmelCase : Any = decoded_processor_out.text
_UpperCAmelCase : Any = list(_A )
with get_context("""fork""" ).Pool() as pool:
_UpperCAmelCase : str = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
_UpperCAmelCase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
_UpperCAmelCase : List[str] = [d[0][2] for d in decoded_decoder_out]
_UpperCAmelCase : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _A , atol=1e-3 ) )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
_UpperCAmelCase : Optional[int] = self._get_dummy_logits()
_UpperCAmelCase : Any = 2.0
_UpperCAmelCase : Union[str, Any] = 5.0
_UpperCAmelCase : List[Any] = -20.0
_UpperCAmelCase : str = True
_UpperCAmelCase : Union[str, Any] = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
_UpperCAmelCase : Tuple = decoded_processor_out.text
_UpperCAmelCase : Tuple = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context("""fork""" ).Pool() as pool:
_UpperCAmelCase : Optional[Any] = decoder.decode_beams_batch(
_A , _A , )
_UpperCAmelCase : Optional[int] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _A )
_UpperCAmelCase : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _A )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_UpperCAmelCase : Optional[Any] = os.listdir(_A )
_UpperCAmelCase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(_A )
_UpperCAmelCase : Any = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Dict = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_UpperCAmelCase : Optional[Any] = os.listdir(_A )
_UpperCAmelCase : Union[str, Any] = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : List[Any] = floats_list((3, 10_00) )
_UpperCAmelCase : str = processor_wavaveca(_A , return_tensors="""np""" )
_UpperCAmelCase : Any = processor_auto(_A , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_UpperCAmelCase : Union[str, Any] = self._get_dummy_logits()
_UpperCAmelCase : Dict = processor_wavaveca.batch_decode(_A )
_UpperCAmelCase : Optional[Any] = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def __snake_case ( _A , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [d[key] for d in offsets]
return retrieved_list
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : List[Any] = self._get_dummy_logits()[0]
_UpperCAmelCase : Tuple = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_UpperCAmelCase : Optional[Any] = self._get_dummy_logits()
_UpperCAmelCase : List[Any] = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_A , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __snake_case ( self ) -> str:
'''simple docstring'''
import torch
_UpperCAmelCase : List[str] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_A )
_UpperCAmelCase : List[Any] = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
_UpperCAmelCase : List[Any] = iter(_A )
_UpperCAmelCase : Optional[Any] = next(_A )
_UpperCAmelCase : Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_UpperCAmelCase : Dict = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCAmelCase : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(_A ).logits.cpu().numpy()
_UpperCAmelCase : Union[str, Any] = processor.decode(logits[0] , output_word_offsets=_A )
_UpperCAmelCase : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCAmelCase : Optional[int] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_UpperCAmelCase : List[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_A , """word""" ) ) , _A )
self.assertEqual(""" """.join(self.get_from_offsets(_A , """word""" ) ) , output.text )
# output times
_UpperCAmelCase : List[str] = torch.tensor(self.get_from_offsets(_A , """start_time""" ) )
_UpperCAmelCase : Any = torch.tensor(self.get_from_offsets(_A , """end_time""" ) )
# fmt: off
_UpperCAmelCase : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_UpperCAmelCase : List[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
| 246 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=_lowerCamelCase )
class a :
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
snake_case_ = None
snake_case_ = None
@dataclass(frozen=_lowerCamelCase )
class a :
snake_case_ = 42
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class a ( _lowerCamelCase ):
snake_case_ = 42
def __init__( self : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : str = None , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any] = False , ):
snake_case_ = hans_processors[task]()
snake_case_ = os.path.join(
__lowerCAmelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__lowerCAmelCase ) , __lowerCAmelCase , ) , )
snake_case_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case_ ,snake_case_ = label_list[2], label_list[1]
snake_case_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}" )
snake_case_ = torch.load(__lowerCAmelCase )
else:
logger.info(F"Creating features from dataset file at {data_dir}" )
snake_case_ = (
processor.get_dev_examples(__lowerCAmelCase ) if evaluate else processor.get_train_examples(__lowerCAmelCase )
)
logger.info('''Training examples: %s''' , len(__lowerCAmelCase ) )
snake_case_ = hans_convert_examples_to_features(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
logger.info('''Saving features into cached file %s''' , __lowerCAmelCase )
torch.save(self.features , __lowerCAmelCase )
def __len__( self : str ):
return len(self.features )
def __getitem__( self : List[str] , lowercase_ : Optional[int] ):
return self.features[i]
def A_ ( self : Tuple ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class a :
snake_case_ = 42
def __init__( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : int = 128 , lowercase_ : Dict=False , lowercase_ : Optional[int] = False , ):
snake_case_ = hans_processors[task]()
snake_case_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case_ ,snake_case_ = label_list[2], label_list[1]
snake_case_ = label_list
snake_case_ = processor.get_dev_examples(__lowerCAmelCase ) if evaluate else processor.get_train_examples(__lowerCAmelCase )
snake_case_ = hans_convert_examples_to_features(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__lowerCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case_ = tf.data.Dataset.from_generator(
__lowerCAmelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self : int ):
return self.dataset
def __len__( self : List[Any] ):
return len(self.features )
def __getitem__( self : Tuple , lowercase_ : Union[str, Any] ):
return self.features[i]
def A_ ( self : int ):
return self.label_list
class a ( _lowerCamelCase ):
def A_ ( self : Union[str, Any] , lowercase_ : int ):
return self._create_examples(self._read_tsv(os.path.join(__lowerCAmelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def A_ ( self : Optional[Any] , lowercase_ : Any ):
return self._create_examples(self._read_tsv(os.path.join(__lowerCAmelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def A_ ( self : Optional[Any] ):
return ["contradiction", "entailment", "neutral"]
def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
snake_case_ = []
for i, line in enumerate(__lowerCAmelCase ):
if i == 0:
continue
snake_case_ = '''%s-%s''' % (set_type, line[0])
snake_case_ = line[5]
snake_case_ = line[6]
snake_case_ = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
snake_case_ = line[0]
examples.append(InputExample(guid=__lowerCAmelCase , text_a=__lowerCAmelCase , text_b=__lowerCAmelCase , label=__lowerCAmelCase , pairID=__lowerCAmelCase ) )
return examples
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = {label: i for i, label in enumerate(lowerCAmelCase__ )}
snake_case_ = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase__ ), desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
snake_case_ = tokenizer(
example.text_a, example.text_b, add_special_tokens=lowerCAmelCase__, max_length=lowerCAmelCase__, padding='''max_length''', truncation=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, )
snake_case_ = label_map[example.label] if example.label in label_map else 0
snake_case_ = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase__, label=lowerCAmelCase__, pairID=lowerCAmelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F"guid: {example}" )
logger.info(F"features: {features[i]}" )
return features
a : Tuple = {
"""hans""": 3,
}
a : str = {
"""hans""": HansProcessor,
}
| 360 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
snake_case_ = "efficientnet"
def __init__( self : int , lowercase_ : int = 3 , lowercase_ : int = 600 , lowercase_ : float = 2.0 , lowercase_ : float = 3.1 , lowercase_ : int = 8 , lowercase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowercase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowercase_ : List[int] = [] , lowercase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase_ : float = 0.25 , lowercase_ : str = "swish" , lowercase_ : int = 2560 , lowercase_ : str = "mean" , lowercase_ : float = 0.02 , lowercase_ : float = 0.001 , lowercase_ : float = 0.99 , lowercase_ : float = 0.5 , lowercase_ : float = 0.2 , **lowercase_ : Optional[Any] , ):
super().__init__(**lowercase_ )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = width_coefficient
snake_case_ = depth_coefficient
snake_case_ = depth_divisor
snake_case_ = kernel_sizes
snake_case_ = in_channels
snake_case_ = out_channels
snake_case_ = depthwise_padding
snake_case_ = strides
snake_case_ = num_block_repeats
snake_case_ = expand_ratios
snake_case_ = squeeze_expansion_ratio
snake_case_ = hidden_act
snake_case_ = hidden_dim
snake_case_ = pooling_type
snake_case_ = initializer_range
snake_case_ = batch_norm_eps
snake_case_ = batch_norm_momentum
snake_case_ = dropout_rate
snake_case_ = drop_connect_rate
snake_case_ = sum(lowercase_ ) * 4
class a ( _lowerCamelCase ):
snake_case_ = version.parse("1.11" )
@property
def A_ ( self : Optional[int] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A_ ( self : List[str] ):
return 1e-5
| 72 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : str = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_( snake_case : list[int] ):
'''simple docstring'''
return len(set(snake_case ) ) == len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__snake_case :List[str] = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
__a = tf.shape(_UpperCAmelCase )
if tensor.shape == tf.TensorShape(_UpperCAmelCase ):
return dynamic
__a = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCAmelCase )]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=_UpperCAmelCase , name=_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1E-5 , _UpperCAmelCase=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__a , __a = tf.nn.moments(_UpperCAmelCase , axes=[axis] , keepdims=_UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__a = [1] * inputs.shape.rank
__a = shape_list(_UpperCAmelCase )[axis]
__a = tf.reshape(_UpperCAmelCase , _UpperCAmelCase )
__a = tf.reshape(_UpperCAmelCase , _UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
__a = tf.nn.batch_normalization(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , offset=_UpperCAmelCase , scale=_UpperCAmelCase , variance_epsilon=_UpperCAmelCase , )
return outputs
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=0 , _UpperCAmelCase=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__a = tf.shape(_UpperCAmelCase )
__a = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__a = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_UpperCAmelCase , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , tf.Tensor ):
__a = tf.convert_to_tensor(_UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__a = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__a = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__a = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "input_ids" ):
tf.debugging.assert_less(
_UpperCAmelCase , tf.cast(_UpperCAmelCase , dtype=tensor.dtype ) , message=(
f'The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCAmelCase )}) must be smaller than the embedding '
f'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__a = [x for x in data if len(_UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
f'bytes: {bad_attributes}' )
__a = np.asarray(_UpperCAmelCase )
__a = 1
__a = np.array_split(_UpperCAmelCase , _UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__a = np.array_split(_UpperCAmelCase , _UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCAmelCase ):
__a = chunk_data
else:
__a = data
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if name in group.attrs:
__a = [n.decode('''utf8''' ) if hasattr(_UpperCAmelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
__a = []
__a = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(_UpperCAmelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __snake_case ( _UpperCAmelCase ):
def _expand_single_ad_tensor(_UpperCAmelCase ):
if isinstance(_UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _UpperCAmelCase )
| 131 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__snake_case :Dict = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : int , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Tuple=None):
'''simple docstring'''
__a = {}
__a = {}
if prompt is not None:
__a = prompt
if generate_kwargs is not None:
__a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''')
__a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=None):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
if prompt is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE)} - but expected a single string. '
'''Note also that one single text can be provided for conditional image to text generation.''')
__a = self.model.config.model_type
if model_type == "git":
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__a = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE).input_ids
__a = [self.tokenizer.cls_token_id] + input_ids
__a = torch.tensor(__SCREAMING_SNAKE_CASE).unsqueeze(0)
model_inputs.update({'''input_ids''': input_ids})
elif model_type == "pix2struct":
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
model_inputs.update(__SCREAMING_SNAKE_CASE)
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation')
else:
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
__a = None
return model_inputs
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=None):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __SCREAMING_SNAKE_CASE)
and all(x is None for x in model_inputs['''input_ids'''])
):
__a = None
if generate_kwargs is None:
__a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__a = model_inputs.pop(self.model.main_input_name)
__a = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return model_outputs
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
for output_ids in model_outputs:
__a = {
'''generated_text''': self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , )
}
records.append(__SCREAMING_SNAKE_CASE)
return records
| 131 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( a_ ,a_ = "cpu" ,a_ = None ) -> None:
__UpperCamelCase : Tuple =torch.load(a_ ,map_location=a_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(a_ ,torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
__UpperCamelCase : Optional[int] =v.half()
if save_path is None: # overwrite src_path
__UpperCamelCase : int =src_path
torch.save(a_ ,a_ )
if __name__ == "__main__":
fire.Fire(convert)
| 71 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""new-model"""
if is_tf_available():
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =NewModelConfig
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] ='bert-base-cased'
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='bert-base-cased'
__UpperCamelCase : Optional[int] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[int] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : str =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_probability
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[str] =TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =copy.deepcopy(model.config )
__UpperCamelCase : Optional[Any] =['FunnelBaseModel']
__UpperCamelCase : Tuple =TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : List[Any] =TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
try:
AutoConfig.register('new-model' , lowerCamelCase__ )
__UpperCamelCase : int =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase : List[str] =BertModelTester(self ).get_config()
__UpperCamelCase : Optional[Any] =NewModelConfig(**tiny_config.to_dict() )
__UpperCamelCase : Dict =auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('bert-base' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase : Union[str, Any] =TFAutoModel.from_pretrained(lowerCamelCase__ , revision='aaaaaa' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__UpperCamelCase : List[str] =TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase__ , 'Use `from_pt=True` to load this model' ):
__UpperCamelCase : List[Any] =TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
__UpperCamelCase : Union[str, Any] =TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 71 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : Tuple = os.path.join(args.tf_model_dir , 'parameters.json' )
lowercase_ : int = json.loads(open(__SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
lowercase_ : Dict = args.output + '.pt'
lowercase_ : Any = OrderedDict()
with tf.device('/CPU:0' ):
lowercase_ : List[str] = tf.train.load_checkpoint(args.tf_model_dir )
lowercase_ : List[str] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase_ : str = reader.get_tensor(__SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
lowercase_ : int = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
lowercase_ : List[Any] = 8
lowercase_ : Any = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase_ : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Tuple = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/moe' ):
lowercase_ : List[Any] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
lowercase_ : Tuple = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
lowercase_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : str = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.endswith('/softmlp/kernel' ):
lowercase_ : Tuple = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
lowercase_ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : str = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
lowercase_ : str = key_name[-9:-7]
for i in range(16 ):
lowercase_ : Optional[Any] = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
lowercase_ : Any = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase_ : Optional[Any] = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/mlp' ):
lowercase_ : List[str] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
lowercase_ : Tuple = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
lowercase_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.endswith('/p1/bias' ):
lowercase_ : Dict = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
lowercase_ : Any = vnp.copy() # same because it is one dimensional
lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.endswith('/p2/kernel' ):
lowercase_ : str = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
lowercase_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Union[str, Any] = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.endswith('/p2/bias' ):
lowercase_ : List[str] = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
lowercase_ : int = vnp.copy() # same because it is one dimensional
lowercase_ : int = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/ln' ):
lowercase_ : Any = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
lowercase_ : Tuple = 'model.blocks.%d.feed_forward.norm.bias' % player
lowercase_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowercase_ : Union[str, Any] = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.endswith('/g' ):
lowercase_ : Tuple = 'model.blocks.%d.feed_forward.norm.weight' % player
lowercase_ : str = vnp.copy() # same because it is one dimensional
lowercase_ : Any = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/att' ):
lowercase_ : int = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
lowercase_ : Optional[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase_ : Any = state[:, 0, :, :]
lowercase_ : List[Any] = state[:, 1, :, :]
lowercase_ : Optional[int] = state[:, 2, :, :]
lowercase_ : Optional[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Dict = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : List[str] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Optional[Any] = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
lowercase_ : List[str] = torch.tensor(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
lowercase_ : Any = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.endswith('/o/kernel' ):
lowercase_ : Dict = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
lowercase_ : Optional[int] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/an' ):
lowercase_ : Any = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
lowercase_ : Tuple = 'model.blocks.%d.self_attn.norm.bias' % player
lowercase_ : int = vnp.copy() # same because it is one dimensional
lowercase_ : int = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.endswith('/g' ):
lowercase_ : Dict = 'model.blocks.%d.self_attn.norm.weight' % player
lowercase_ : Optional[int] = vnp.copy() # same because it is one dimensional
lowercase_ : Dict = torch.tensor(__SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
lowercase_ : int = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
lowercase_ : str = 'model.%s.weight' % nlayer
lowercase_ : int = vnp.copy() # same in embedded
lowercase_ : Optional[Any] = torch.tensor(__SCREAMING_SNAKE_CASE )
if key_name.startswith('model/wte' ):
lowercase_ : str = 'lm_head.weight'
lowercase_ : List[str] = vnp.copy() # same in embedded
lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/wob' ):
lowercase_ : Union[str, Any] = 'final_logits_bias'
lowercase_ : Tuple = vnp.copy() # same in embedded
lowercase_ : List[Any] = state.reshape((1, -1) )
lowercase_ : Optional[Any] = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase_ : Optional[int] = 'model.last_project.weight'
lowercase_ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase_ : List[str] = torch.tensor(__SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase_ : int = 'model.last_project.bias'
lowercase_ : List[Any] = vnp.copy() # same because it is one dimensional
lowercase_ : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 364 | """simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321 | 0 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
"""simple docstring"""
_lowerCamelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def __snake_case ( ):
if os.name == "nt":
lowerCamelCase_ = CursorInfo()
lowerCamelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
lowerCamelCase_ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def __snake_case ( ):
if os.name == "nt":
lowerCamelCase_ = CursorInfo()
lowerCamelCase_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
lowerCamelCase_ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase_ , ctypes.byref(UpperCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def __snake_case ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 55 |
'''simple docstring'''
import math
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : float = 1 / 12345 ):
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 3
while True:
lowerCamelCase_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(UpperCAmelCase_ ):
lowerCamelCase_ = int(UpperCAmelCase_ )
total_partitions += 1
if check_partition_perfect(UpperCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(UpperCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
_SCREAMING_SNAKE_CASE = {
"""facebook/bart-base""": 1_0_2_4,
"""facebook/bart-large""": 1_0_2_4,
"""facebook/bart-large-mnli""": 1_0_2_4,
"""facebook/bart-large-cnn""": 1_0_2_4,
"""facebook/bart-large-xsum""": 1_0_2_4,
"""yjernite/bart_eli5""": 1_0_2_4,
}
@lru_cache()
def lowercase( ) -> str:
'''simple docstring'''
UpperCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any]="replace" , lowerCamelCase_ : Tuple="<s>" , lowerCamelCase_ : List[str]="</s>" , lowerCamelCase_ : int="</s>" , lowerCamelCase_ : Dict="<s>" , lowerCamelCase_ : Optional[Any]="<unk>" , lowerCamelCase_ : Optional[Any]="<pad>" , lowerCamelCase_ : str="<mask>" , lowerCamelCase_ : int=False , **lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = word
return word
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
UpperCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCamelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str]=False , **lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase = """ """ + text
return (text, kwargs)
| 366 | import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_SCREAMING_SNAKE_CASE = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_SCREAMING_SNAKE_CASE = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
_SCREAMING_SNAKE_CASE = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 165 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''MobileViTFeatureExtractor''']
_A = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 278 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __UpperCAmelCase ):
__snake_case = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = 0.5
assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 278 | 1 |
'''simple docstring'''
import requests
_SCREAMING_SNAKE_CASE = '''''' # <-- Put your OpenWeatherMap appid here!
_SCREAMING_SNAKE_CASE = '''https://api.openweathermap.org/data/2.5/'''
def __lowerCamelCase ( __lowerCAmelCase : str = "Chicago" , __lowerCAmelCase : str = APPID ) -> Any:
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def __lowerCamelCase ( __lowerCAmelCase : str = "Kolkata, India" , __lowerCAmelCase : str = APPID ) -> Optional[Any]:
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def __lowerCamelCase ( __lowerCAmelCase : float = 55.68 , __lowerCAmelCase : float = 12.57 , __lowerCAmelCase : str = APPID ) -> List[Any]:
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_SCREAMING_SNAKE_CASE = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 354 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
"""simple docstring"""
from math import ceil
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_1 ):
'''simple docstring'''
lowerCAmelCase : Dict = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase : Union[str, Any] = 2 * i + 1
lowerCAmelCase : Dict = 2 * i
lowerCAmelCase : Dict = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCAmelCase__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 108 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" )
__UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
__UpperCamelCase : Union[str, Any] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
__UpperCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(snake_case__ ),
}
__UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ )
__UpperCamelCase : str = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = hf_param.shape
__UpperCamelCase : List[Any] = to_torch(params[gluon_param] )
__UpperCamelCase : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__UpperCamelCase : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__UpperCamelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase : BertSelfAttention = layer.attention.self
__UpperCamelCase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__UpperCamelCase : str = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__UpperCamelCase : Tuple = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__UpperCamelCase : BertSelfOutput = layer.attention.output
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__UpperCamelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__UpperCamelCase : BertIntermediate = layer.intermediate
__UpperCamelCase : Dict = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__UpperCamelCase : BertOutput = layer.output
__UpperCamelCase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__UpperCamelCase : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" )
__UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"]
# Get gluon output
__UpperCamelCase : Dict = mx.nd.array([input_ids] )
__UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
__UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
__UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0]
__UpperCamelCase : List[Any] = output_gluon[0].asnumpy()
__UpperCamelCase : Optional[int] = output_hf[0].detach().numpy()
__UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 298 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=1_0 ):
'''simple docstring'''
lowerCAmelCase : Dict = []
for _ in range(SCREAMING_SNAKE_CASE ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=1_0 ):
'''simple docstring'''
lowerCAmelCase : str = []
for step in range(SCREAMING_SNAKE_CASE ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE , "schedule.bin" )
torch.save(scheduler.state_dict() , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = torch.load(SCREAMING_SNAKE_CASE )
scheduler.load_state_dict(SCREAMING_SNAKE_CASE )
return lrs
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for a, b in zip(snake_case__ , snake_case__ ):
self.assertAlmostEqual(snake_case__ , snake_case__ , delta=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case__ )
lowerCAmelCase : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
lowerCAmelCase : Dict = criterion(snake_case__ , snake_case__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case__ )
lowerCAmelCase : Optional[int] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : List[str] = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case__ , weight_decay=0.0 , relative_step=snake_case__ , scale_parameter=snake_case__ , warmup_init=snake_case__ , )
for _ in range(1_000 ):
lowerCAmelCase : List[str] = criterion(snake_case__ , snake_case__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : str =nn.Linear(50 , 50 ) if is_torch_available() else None
a : Union[str, Any] =AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
a : List[Any] =10
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for a, b in zip(snake_case__ , snake_case__ ):
self.assertAlmostEqual(snake_case__ , snake_case__ , delta=snake_case__ , msg=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase : Optional[int] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase , lowerCAmelCase : Dict = data
lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **snake_case__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase : str = unwrap_schedule(snake_case__ , self.num_steps )
self.assertListAlmostEqual(
snake_case__ , snake_case__ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
lowerCAmelCase : Union[str, Any] = scheduler_func(self.optimizer , **snake_case__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case__ ) # wrap to test picklability of the schedule
lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(snake_case__ , self.num_steps )
self.assertListEqual(snake_case__ , snake_case__ , msg=f"""failed for {scheduler_func} in save and reload""" )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = fn
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.fn(*snake_case__ , **snake_case__ )
@classmethod
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = list(map(self , scheduler.lr_lambdas ) )
| 108 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : str =BigBirdTokenizer
a : Union[str, Any] =BigBirdTokenizerFast
a : Tuple =True
a : Any =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : str = self.tokenizer_class(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "<s>"
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(snake_case__ ) , 1_004 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowercase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase : Tuple = self.get_tokenizer()
lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase : Tuple = "I was born in 92000, and this is falsé."
lowerCAmelCase : Optional[int] = tokenizer.tokenize(snake_case__ )
lowerCAmelCase : int = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowerCAmelCase : int = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ )
lowerCAmelCase : List[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = BigBirdTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "Hello World!"
lowerCAmelCase : Any = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowerCAmelCase : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase : int = " ".join(snake_case__ )
lowerCAmelCase : Dict = self.big_tokenizer.encode_plus(snake_case__ , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : Any = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=snake_case__ )
lowerCAmelCase : str = BigBirdConfig(attention_type="original_full" )
lowerCAmelCase : Any = BigBirdModel(snake_case__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case__ )
model(**snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowerCAmelCase : Union[str, Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 108 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class _a ( UpperCamelCase__):
_a : str = """efficientnet"""
def __init__( self : str , _SCREAMING_SNAKE_CASE : int = 3 , _SCREAMING_SNAKE_CASE : int = 600 , _SCREAMING_SNAKE_CASE : float = 2.0 , _SCREAMING_SNAKE_CASE : float = 3.1 , _SCREAMING_SNAKE_CASE : int = 8 , _SCREAMING_SNAKE_CASE : List[int] = [3, 3, 5, 3, 5, 5, 3] , _SCREAMING_SNAKE_CASE : List[int] = [32, 16, 24, 40, 80, 112, 192] , _SCREAMING_SNAKE_CASE : List[int] = [16, 24, 40, 80, 112, 192, 320] , _SCREAMING_SNAKE_CASE : List[int] = [] , _SCREAMING_SNAKE_CASE : List[int] = [1, 2, 2, 2, 1, 2, 1] , _SCREAMING_SNAKE_CASE : List[int] = [1, 2, 2, 3, 3, 4, 1] , _SCREAMING_SNAKE_CASE : List[int] = [1, 6, 6, 6, 6, 6, 6] , _SCREAMING_SNAKE_CASE : float = 0.25 , _SCREAMING_SNAKE_CASE : str = "swish" , _SCREAMING_SNAKE_CASE : int = 2560 , _SCREAMING_SNAKE_CASE : str = "mean" , _SCREAMING_SNAKE_CASE : float = 0.02 , _SCREAMING_SNAKE_CASE : float = 0.001 , _SCREAMING_SNAKE_CASE : float = 0.99 , _SCREAMING_SNAKE_CASE : float = 0.5 , _SCREAMING_SNAKE_CASE : float = 0.2 , **_SCREAMING_SNAKE_CASE : List[Any] , )-> Dict:
super().__init__(**__a )
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Optional[int] = image_size
lowerCAmelCase__ : Union[str, Any] = width_coefficient
lowerCAmelCase__ : Optional[int] = depth_coefficient
lowerCAmelCase__ : List[str] = depth_divisor
lowerCAmelCase__ : Optional[Any] = kernel_sizes
lowerCAmelCase__ : Any = in_channels
lowerCAmelCase__ : Optional[int] = out_channels
lowerCAmelCase__ : int = depthwise_padding
lowerCAmelCase__ : Dict = strides
lowerCAmelCase__ : Any = num_block_repeats
lowerCAmelCase__ : Dict = expand_ratios
lowerCAmelCase__ : List[Any] = squeeze_expansion_ratio
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Any = hidden_dim
lowerCAmelCase__ : List[Any] = pooling_type
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : str = batch_norm_eps
lowerCAmelCase__ : Union[str, Any] = batch_norm_momentum
lowerCAmelCase__ : List[str] = dropout_rate
lowerCAmelCase__ : str = drop_connect_rate
lowerCAmelCase__ : Dict = sum(__a ) * 4
class _a ( UpperCamelCase__):
_a : Any = version.parse('''1.11''')
@property
def UpperCAmelCase__( self : Optional[int] )-> Optional[int]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__( self : Optional[Any] )-> str:
return 1E-5
| 368 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Any = filter(lambda _a : p.requires_grad , model.parameters() )
lowerCAmelCase__ : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase = logging.getLogger(__name__)
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
if metric == "rouge2":
lowerCAmelCase__ : Optional[int] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowerCAmelCase__ : Optional[int] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowerCAmelCase__ : List[Any] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
''' function.''' )
lowerCAmelCase__ : Dict = ModelCheckpoint(
dirpath=_a , filename=_a , monitor=f'val_{metric}' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_a , verbose=_a , )
class _a ( pl.Callback):
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any )-> Optional[int]:
lowerCAmelCase__ : Dict = {F'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str]=True )-> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
lowerCAmelCase__ : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowerCAmelCase__ : List[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCAmelCase__ : Optional[int] = od / '''test_results.txt'''
lowerCAmelCase__ : Tuple = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCAmelCase__ : int = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
lowerCAmelCase__ : int = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , '''a+''' ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCAmelCase__ : Optional[int] = metrics[key]
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
lowerCAmelCase__ : List[str] = val.item()
lowerCAmelCase__ : List[str] = F'{key}: {val:.6f}\n'
writer.write(_SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
lowerCAmelCase__ : Dict = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Optional[int]:
try:
lowerCAmelCase__ : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
lowerCAmelCase__ : Optional[Any] = pl_module.model.num_parameters()
lowerCAmelCase__ : Dict = count_trainable_parameters(_SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : pl.LightningModule )-> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''test''' )
@rank_zero_only
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : pl.Trainer , _SCREAMING_SNAKE_CASE : List[Any] )-> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 211 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :str = 'altclip_text_model'
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_5_0_0_0_2 , SCREAMING_SNAKE_CASE_ : int=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_4 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : str="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=5_1_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : str=1e-05 , SCREAMING_SNAKE_CASE_ : List[str]=1 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : int="absolute" , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=7_6_8 , **SCREAMING_SNAKE_CASE_ : int , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = initializer_factor
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = project_dim
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = 'altclip_vision_model'
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : Dict=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_2_4 , SCREAMING_SNAKE_CASE_ : List[str]=3_2 , SCREAMING_SNAKE_CASE_ : Tuple="quick_gelu" , SCREAMING_SNAKE_CASE_ : Dict=1e-5 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=1.0 , **SCREAMING_SNAKE_CASE_ : str , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_size
lowercase_ = intermediate_size
lowercase_ = projection_dim
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = num_channels
lowercase_ = patch_size
lowercase_ = image_size
lowercase_ = initializer_range
lowercase_ = initializer_factor
lowercase_ = attention_dropout
lowercase_ = layer_norm_eps
lowercase_ = hidden_act
@classmethod
def _lowercase ( cls : Dict , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowercase_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = 'altclip'
a :List[str] = True
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE_ : Dict=2.65_92 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase_ = kwargs.pop('''text_config_dict''' , SCREAMING_SNAKE_CASE_ )
lowercase_ = kwargs.pop('''vision_config_dict''' , SCREAMING_SNAKE_CASE_ )
super().__init__(**SCREAMING_SNAKE_CASE_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase_ = {}
# This is the complete result when using `text_config_dict`.
lowercase_ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase_ = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(SCREAMING_SNAKE_CASE_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase_ = {}
# This is the complete result when using `vision_config_dict`.
lowercase_ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase_ = {
str(SCREAMING_SNAKE_CASE_ ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase_ = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(SCREAMING_SNAKE_CASE_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase_ = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowercase_ = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowercase_ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ )
lowercase_ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ )
lowercase_ = projection_dim
lowercase_ = logit_scale_init_value
lowercase_ = 1.0
@classmethod
def _lowercase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : AltCLIPTextConfig , SCREAMING_SNAKE_CASE_ : AltCLIPVisionConfig , **SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Dict:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.text_config.to_dict()
lowercase_ = self.vision_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "unispeech"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = feat_extract_norm
_lowerCamelCase : List[Any] = feat_extract_activation
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : List[str] = num_conv_pos_embeddings
_lowerCamelCase : Tuple = num_conv_pos_embedding_groups
_lowerCamelCase : List[str] = len(self.conv_dim )
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Optional[Any] = feat_proj_dropout
_lowerCamelCase : Optional[int] = final_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = num_ctc_classes
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = do_stable_layer_norm
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Any = apply_spec_augment
_lowerCamelCase : Dict = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[str] = mask_feature_prob
_lowerCamelCase : int = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : Optional[Any] = num_codevectors_per_group
_lowerCamelCase : int = num_codevector_groups
_lowerCamelCase : List[Any] = contrastive_logits_temperature
_lowerCamelCase : List[str] = feat_quantizer_dropout
_lowerCamelCase : Dict = num_negatives
_lowerCamelCase : Optional[int] = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCamelCase : Union[str, Any] = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# pretraining loss
_lowerCamelCase : str = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72 | 0 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __UpperCamelCase ( nn.Module ):
def __init__( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :str = "layer_norm" ,_UpperCamelCase :bool = False ,):
super().__init__()
snake_case_ : Any = only_cross_attention
snake_case_ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
snake_case_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case_ : Dict = AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_ : str = AdaLayerNormZero(_UpperCamelCase ,_UpperCamelCase )
else:
snake_case_ : List[Any] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
snake_case_ : List[str] = Attention(
query_dim=_UpperCamelCase ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_UpperCamelCase ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case_ : str = (
AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
)
snake_case_ : List[str] = Attention(
query_dim=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,upcast_attention=_UpperCamelCase ,) # is self-attn if encoder_hidden_states is none
else:
snake_case_ : Any = None
snake_case_ : Optional[Any] = None
# 3. Feed-forward
snake_case_ : List[str] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
snake_case_ : Union[str, Any] = FeedForward(_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn=_UpperCamelCase ,final_dropout=_UpperCamelCase )
# let chunk size default to None
snake_case_ : Optional[int] = None
snake_case_ : Dict = 0
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ):
# Sets chunk feed-forward
snake_case_ : Optional[Any] = chunk_size
snake_case_ : Optional[Any] = dim
def a__ ( self :List[str] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,_UpperCamelCase :Dict[str, Any] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ,_UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_ : Union[str, Any] = self.norma(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=hidden_states.dtype )
else:
snake_case_ : Optional[int] = self.norma(_UpperCamelCase )
snake_case_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case_ : Union[str, Any] = self.attna(
_UpperCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,)
if self.use_ada_layer_norm_zero:
snake_case_ : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output
snake_case_ : Union[str, Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case_ : Any = (
self.norma(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase )
)
snake_case_ : List[Any] = self.attna(
_UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,)
snake_case_ : Tuple = attn_output + hidden_states
# 3. Feed-forward
snake_case_ : Optional[Any] = self.norma(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
snake_case_ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case_ : int = torch.cat(
[self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
snake_case_ : List[str] = self.ff(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case_ : Any = ff_output + hidden_states
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 4 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :bool = False ,):
super().__init__()
snake_case_ : Tuple = int(dim * mult )
snake_case_ : Optional[int] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case_ : Any = GELU(_UpperCamelCase ,_UpperCamelCase )
if activation_fn == "gelu-approximate":
snake_case_ : Tuple = GELU(_UpperCamelCase ,_UpperCamelCase ,approximate="""tanh""" )
elif activation_fn == "geglu":
snake_case_ : Dict = GEGLU(_UpperCamelCase ,_UpperCamelCase )
elif activation_fn == "geglu-approximate":
snake_case_ : Optional[Any] = ApproximateGELU(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Dict = nn.ModuleList([] )
# project in
self.net.append(_UpperCamelCase )
# project dropout
self.net.append(nn.Dropout(_UpperCamelCase ) )
# project out
self.net.append(nn.Linear(_UpperCamelCase ,_UpperCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_UpperCamelCase ) )
def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ):
for module in self.net:
snake_case_ : Tuple = module(_UpperCamelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :str = "none" ):
super().__init__()
snake_case_ : Union[str, Any] = nn.Linear(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Optional[Any] = approximate
def a__ ( self :str ,_UpperCamelCase :int ):
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ):
snake_case_ : Optional[Any] = self.proj(_UpperCamelCase )
snake_case_ : int = self.gelu(_UpperCamelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : str = nn.Linear(_UpperCamelCase ,dim_out * 2 )
def a__ ( self :Dict ,_UpperCamelCase :List[str] ):
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ):
snake_case_ : Dict = self.proj(_UpperCamelCase ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(_UpperCamelCase )
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : int = nn.Linear(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ):
snake_case_ : int = self.proj(_UpperCamelCase )
return x * torch.sigmoid(1.7_02 * x )
class __UpperCamelCase ( nn.Module ):
def __init__( self :int ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ):
super().__init__()
snake_case_ : int = nn.Embedding(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Union[str, Any] = nn.SiLU()
snake_case_ : Any = nn.Linear(_UpperCamelCase ,embedding_dim * 2 )
snake_case_ : Dict = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ):
snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ) ) )
snake_case_ : Tuple = torch.chunk(_UpperCamelCase ,2 )
snake_case_ : Tuple = self.norm(_UpperCamelCase ) * (1 + scale) + shift
return x
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : int = CombinedTimestepLabelEmbeddings(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : int = nn.SiLU()
snake_case_ : List[str] = nn.Linear(_UpperCamelCase ,6 * embedding_dim ,bias=_UpperCamelCase )
snake_case_ : str = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ,eps=1E-6 )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str=None ):
snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=_UpperCamelCase ) ) )
snake_case_ : Any = emb.chunk(6 ,dim=1 )
snake_case_ : str = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __UpperCamelCase ( nn.Module ):
def __init__( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Optional[str] = None ,_UpperCamelCase :float = 1E-5 ):
super().__init__()
snake_case_ : Optional[int] = num_groups
snake_case_ : List[Any] = eps
if act_fn is None:
snake_case_ : int = None
else:
snake_case_ : Dict = get_activation(_UpperCamelCase )
snake_case_ : Optional[int] = nn.Linear(_UpperCamelCase ,out_dim * 2 )
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ):
if self.act:
snake_case_ : Any = self.act(_UpperCamelCase )
snake_case_ : Optional[int] = self.linear(_UpperCamelCase )
snake_case_ : Dict = emb[:, :, None, None]
snake_case_ : str = emb.chunk(2 ,dim=1 )
snake_case_ : str = F.group_norm(_UpperCamelCase ,self.num_groups ,eps=self.eps )
snake_case_ : List[str] = x * (1 + scale) + shift
return x | 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : str = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __UpperCamelCase ( lowercase__ ):
lowercase : List[Any] = 'canine'
def __init__( self :Optional[int] ,_UpperCamelCase :Dict=7_6_8 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=3_0_7_2 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Any=1_6_3_8_4 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :List[str]=0.02 ,_UpperCamelCase :Any=1E-1_2 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :List[str]=0xE_0_0_0 ,_UpperCamelCase :Optional[Any]=0xE_0_0_1 ,_UpperCamelCase :str=4 ,_UpperCamelCase :Optional[int]=4 ,_UpperCamelCase :str=8 ,_UpperCamelCase :int=1_6_3_8_4 ,_UpperCamelCase :int=1_2_8 ,**_UpperCamelCase :str ,):
super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : List[str] = layer_norm_eps
# Character config:
snake_case_ : Any = downsampling_rate
snake_case_ : List[str] = upsampling_kernel_size
snake_case_ : int = num_hash_functions
snake_case_ : Tuple = num_hash_buckets
snake_case_ : Tuple = local_transformer_stride | 8 | 0 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCamelCase :List[Any] = grid[0]
for row_n in range(1 , len(__magic_name__ ) ):
UpperCamelCase :List[str] = grid[row_n]
UpperCamelCase :List[Any] = fill_row(__magic_name__ , __magic_name__ )
UpperCamelCase :Tuple = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(__magic_name__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: float , _lowerCamelCase: list[float] ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
__SCREAMING_SNAKE_CASE : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowerCamelCase ) )
return round(_lowerCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 112 | 0 |
def __lowercase ( lowerCamelCase : str ):
if n_term == "":
return []
UpperCamelCase_ : list = []
for temp in range(int(lowerCamelCase ) ):
series.append(F"1/{temp + 1}" if series else '1' )
return series
if __name__ == "__main__":
a_ = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 50 | def __lowercase ( lowerCamelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
UpperCamelCase_ : Optional[int] = sorted(string.lower() )
return len(lowerCamelCase ) == len(set(lowerCamelCase ) )
if __name__ == "__main__":
a_ = input('Enter a string ').strip()
a_ = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 50 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : Union[str, Any] = '▁'
a__ : Tuple = {'vocab_file': 'spiece.model'}
a__ : Optional[Any] = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
a__ : List[Any] = {
'google/reformer-crime-and-punishment': 5_2_4_2_8_8,
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a , a="</s>" , a="<unk>" , a=[] , a = None , **a , ):
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a , unk_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def __a ( self ):
return self.sp_model.get_piece_size()
def __a ( self ):
UpperCamelCase__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , a ):
return self.sp_model.encode(a , out_type=a )
def __a ( self , a ):
return self.sp_model.piece_to_id(a )
def __a ( self , a ):
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(a )
return token
def __a ( self , a ):
UpperCamelCase__ = []
UpperCamelCase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
UpperCamelCase__ = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def __a ( self , a , a = None ):
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 80 |
'''simple docstring'''
import argparse
import struct
import unittest
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = data
# Initialize hash values
UpperCamelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCamelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCamelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> bytes:
"""simple docstring"""
UpperCamelCase = B"""\x80""" + (B"""\x00""" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64))
UpperCamelCase = struct.pack(""">Q""" , (len(_SCREAMING_SNAKE_CASE ) * 8) )
return data + padding + big_endian_integer
def A__ ( self ) -> None:
"""simple docstring"""
UpperCamelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase = list(struct.unpack(""">16L""" , _SCREAMING_SNAKE_CASE ) )
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCamelCase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCamelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 )
UpperCamelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCamelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCamelCase = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 )
UpperCamelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase = (sa + maj) % 0x1_00_00_00_00
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCamelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCamelCase = """""".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class a_ ( unittest.TestCase ):
def A__ ( self ) -> None:
"""simple docstring"""
import hashlib
UpperCamelCase = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() )
def lowercase__ ( )-> None:
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCamelCase = f.read()
else:
UpperCamelCase = bytes(__UpperCamelCase , """utf-8""" )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 321 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
# Base Case
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
__lowerCamelCase = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
__lowerCamelCase = -1
return False
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = 0 ) -> list[int]:
__lowerCamelCase = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
__lowerCamelCase = __lowerCamelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 361 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = 5
# Realm tok
__lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(a , exist_ok=a )
__lowerCamelCase = os.path.join(a , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowerCamelCase = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(a , exist_ok=a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=a , )
return block_records
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.get_config()
__lowerCamelCase = self.get_dummy_retriever()
__lowerCamelCase = retriever.tokenizer
__lowerCamelCase = np.array([0, 3] , dtype='''long''' )
__lowerCamelCase = tokenizer(['''Test question'''] ).input_ids
__lowerCamelCase = tokenizer(
['''the fourth'''] , add_special_tokens=a , return_token_type_ids=a , return_attention_mask=a , ).input_ids
__lowerCamelCase = config.reader_seq_len
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever(
a , a , answer_ids=a , max_length=a , return_tensors='''np''' )
self.assertEqual(len(a ) , 2 )
self.assertEqual(len(a ) , 2 )
self.assertEqual(len(a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_config()
__lowerCamelCase = self.get_dummy_retriever()
__lowerCamelCase = retriever.tokenizer
__lowerCamelCase = np.array([0, 3, 5] , dtype='''long''' )
__lowerCamelCase = tokenizer(['''Test question'''] ).input_ids
__lowerCamelCase = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=a , return_token_type_ids=a , return_attention_mask=a , ).input_ids
__lowerCamelCase = config.reader_seq_len
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever(
a , a , answer_ids=a , max_length=a , return_tensors='''np''' )
self.assertEqual([False, True, True] , a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , a )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
__lowerCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
__lowerCamelCase = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowerCamelCase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 237 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.